aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-07-16 23:31:16 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-16 23:31:16 -0400
commitde0744af1fe2d0a3d428f6af0f2fe1f6179b1a9c (patch)
tree68d02820b1aa13e8fa9743c0ece5930a13d5a205 /net/ipv4/tcp_input.c
parent4e6734447dbc7a0a85e09616821c0782d9fb1141 (diff)
mib: add net to NET_INC_STATS_BH
Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c65
1 files changed, 33 insertions, 32 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f50d8433f042..fac49a6e1611 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -961,7 +961,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
961 else 961 else
962 mib_idx = LINUX_MIB_TCPSACKREORDER; 962 mib_idx = LINUX_MIB_TCPSACKREORDER;
963 963
964 NET_INC_STATS_BH(mib_idx); 964 NET_INC_STATS_BH(sock_net(sk), mib_idx);
965#if FASTRETRANS_DEBUG > 1 965#if FASTRETRANS_DEBUG > 1
966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 966 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 967 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1157,7 +1157,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1157 tp->lost_out += tcp_skb_pcount(skb); 1157 tp->lost_out += tcp_skb_pcount(skb);
1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1158 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1159 } 1159 }
1160 NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); 1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1161 } else { 1161 } else {
1162 if (before(ack_seq, new_low_seq)) 1162 if (before(ack_seq, new_low_seq))
1163 new_low_seq = ack_seq; 1163 new_low_seq = ack_seq;
@@ -1181,7 +1181,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1181 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1182 dup_sack = 1; 1182 dup_sack = 1;
1183 tcp_dsack_seen(tp); 1183 tcp_dsack_seen(tp);
1184 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); 1184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1185 } else if (num_sacks > 1) { 1185 } else if (num_sacks > 1) {
1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1186 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1187 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1190,7 +1190,8 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1190 !before(start_seq_0, start_seq_1)) { 1190 !before(start_seq_0, start_seq_1)) {
1191 dup_sack = 1; 1191 dup_sack = 1;
1192 tcp_dsack_seen(tp); 1192 tcp_dsack_seen(tp);
1193 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); 1193 NET_INC_STATS_BH(sock_net(sk),
1194 LINUX_MIB_TCPDSACKOFORECV);
1194 } 1195 }
1195 } 1196 }
1196 1197
@@ -1476,7 +1477,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1476 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1477 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1477 } 1478 }
1478 1479
1479 NET_INC_STATS_BH(mib_idx); 1480 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1480 if (i == 0) 1481 if (i == 0)
1481 first_sack_index = -1; 1482 first_sack_index = -1;
1482 continue; 1483 continue;
@@ -1969,7 +1970,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
1969{ 1970{
1970 if (flag & FLAG_SACK_RENEGING) { 1971 if (flag & FLAG_SACK_RENEGING) {
1971 struct inet_connection_sock *icsk = inet_csk(sk); 1972 struct inet_connection_sock *icsk = inet_csk(sk);
1972 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1973 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1973 1974
1974 tcp_enter_loss(sk, 1); 1975 tcp_enter_loss(sk, 1);
1975 icsk->icsk_retransmits++; 1976 icsk->icsk_retransmits++;
@@ -2401,7 +2402,7 @@ static int tcp_try_undo_recovery(struct sock *sk)
2401 else 2402 else
2402 mib_idx = LINUX_MIB_TCPFULLUNDO; 2403 mib_idx = LINUX_MIB_TCPFULLUNDO;
2403 2404
2404 NET_INC_STATS_BH(mib_idx); 2405 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2405 tp->undo_marker = 0; 2406 tp->undo_marker = 0;
2406 } 2407 }
2407 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2408 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
@@ -2424,7 +2425,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
2424 DBGUNDO(sk, "D-SACK"); 2425 DBGUNDO(sk, "D-SACK");
2425 tcp_undo_cwr(sk, 1); 2426 tcp_undo_cwr(sk, 1);
2426 tp->undo_marker = 0; 2427 tp->undo_marker = 0;
2427 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); 2428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2428 } 2429 }
2429} 2430}
2430 2431
@@ -2447,7 +2448,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2447 2448
2448 DBGUNDO(sk, "Hoe"); 2449 DBGUNDO(sk, "Hoe");
2449 tcp_undo_cwr(sk, 0); 2450 tcp_undo_cwr(sk, 0);
2450 NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); 2451 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2451 2452
2452 /* So... Do not make Hoe's retransmit yet. 2453 /* So... Do not make Hoe's retransmit yet.
2453 * If the first packet was delayed, the rest 2454 * If the first packet was delayed, the rest
@@ -2476,7 +2477,7 @@ static int tcp_try_undo_loss(struct sock *sk)
2476 DBGUNDO(sk, "partial loss"); 2477 DBGUNDO(sk, "partial loss");
2477 tp->lost_out = 0; 2478 tp->lost_out = 0;
2478 tcp_undo_cwr(sk, 1); 2479 tcp_undo_cwr(sk, 1);
2479 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 2480 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2480 inet_csk(sk)->icsk_retransmits = 0; 2481 inet_csk(sk)->icsk_retransmits = 0;
2481 tp->undo_marker = 0; 2482 tp->undo_marker = 0;
2482 if (tcp_is_sack(tp)) 2483 if (tcp_is_sack(tp))
@@ -2595,7 +2596,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2595 icsk->icsk_ca_state != TCP_CA_Open && 2596 icsk->icsk_ca_state != TCP_CA_Open &&
2596 tp->fackets_out > tp->reordering) { 2597 tp->fackets_out > tp->reordering) {
2597 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2598 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2598 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2599 } 2600 }
2600 2601
2601 /* D. Check consistency of the current state. */ 2602 /* D. Check consistency of the current state. */
@@ -2700,7 +2701,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2700 else 2701 else
2701 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2702 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2702 2703
2703 NET_INC_STATS_BH(mib_idx); 2704 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2704 2705
2705 tp->high_seq = tp->snd_nxt; 2706 tp->high_seq = tp->snd_nxt;
2706 tp->prior_ssthresh = 0; 2707 tp->prior_ssthresh = 0;
@@ -3211,7 +3212,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
3211 } 3212 }
3212 tp->frto_counter = 0; 3213 tp->frto_counter = 0;
3213 tp->undo_marker = 0; 3214 tp->undo_marker = 0;
3214 NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); 3215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3215 } 3216 }
3216 return 0; 3217 return 0;
3217} 3218}
@@ -3264,12 +3265,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3264 3265
3265 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3266 tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3266 3267
3267 NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS); 3268 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3268 } else { 3269 } else {
3269 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3270 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3270 flag |= FLAG_DATA; 3271 flag |= FLAG_DATA;
3271 else 3272 else
3272 NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); 3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3273 3274
3274 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3275 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3275 3276
@@ -3724,7 +3725,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
3724 else 3725 else
3725 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 3726 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
3726 3727
3727 NET_INC_STATS_BH(mib_idx); 3728 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3728 3729
3729 tp->rx_opt.dsack = 1; 3730 tp->rx_opt.dsack = 1;
3730 tp->duplicate_sack[0].start_seq = seq; 3731 tp->duplicate_sack[0].start_seq = seq;
@@ -3750,7 +3751,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
3750 3751
3751 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3752 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
3752 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3753 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3753 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 3754 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
3754 tcp_enter_quickack_mode(sk); 3755 tcp_enter_quickack_mode(sk);
3755 3756
3756 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3757 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4039,7 +4040,7 @@ queue_and_out:
4039 4040
4040 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4041 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4041 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4042 /* A retransmit, 2nd most common case. Force an immediate ack. */
4042 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 4043 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4043 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4044 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4044 4045
4045out_of_window: 4046out_of_window:
@@ -4181,7 +4182,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4181 struct sk_buff *next = skb->next; 4182 struct sk_buff *next = skb->next;
4182 __skb_unlink(skb, list); 4183 __skb_unlink(skb, list);
4183 __kfree_skb(skb); 4184 __kfree_skb(skb);
4184 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4185 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4185 skb = next; 4186 skb = next;
4186 continue; 4187 continue;
4187 } 4188 }
@@ -4249,7 +4250,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4249 struct sk_buff *next = skb->next; 4250 struct sk_buff *next = skb->next;
4250 __skb_unlink(skb, list); 4251 __skb_unlink(skb, list);
4251 __kfree_skb(skb); 4252 __kfree_skb(skb);
4252 NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED); 4253 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4253 skb = next; 4254 skb = next;
4254 if (skb == tail || 4255 if (skb == tail ||
4255 tcp_hdr(skb)->syn || 4256 tcp_hdr(skb)->syn ||
@@ -4312,7 +4313,7 @@ static int tcp_prune_ofo_queue(struct sock *sk)
4312 int res = 0; 4313 int res = 0;
4313 4314
4314 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4315 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4315 NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 4316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4316 __skb_queue_purge(&tp->out_of_order_queue); 4317 __skb_queue_purge(&tp->out_of_order_queue);
4317 4318
4318 /* Reset SACK state. A conforming SACK implementation will 4319 /* Reset SACK state. A conforming SACK implementation will
@@ -4341,7 +4342,7 @@ static int tcp_prune_queue(struct sock *sk)
4341 4342
4342 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4343 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4343 4344
4344 NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); 4345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4345 4346
4346 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4347 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4347 tcp_clamp_window(sk); 4348 tcp_clamp_window(sk);
@@ -4370,7 +4371,7 @@ static int tcp_prune_queue(struct sock *sk)
4370 * drop receive data on the floor. It will get retransmitted 4371 * drop receive data on the floor. It will get retransmitted
4371 * and hopefully then we'll have sufficient space. 4372 * and hopefully then we'll have sufficient space.
4372 */ 4373 */
4373 NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED); 4374 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4374 4375
4375 /* Massive buffer overcommit. */ 4376 /* Massive buffer overcommit. */
4376 tp->pred_flags = 0; 4377 tp->pred_flags = 0;
@@ -4837,7 +4838,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4837 4838
4838 __skb_pull(skb, tcp_header_len); 4839 __skb_pull(skb, tcp_header_len);
4839 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4840 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4840 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER); 4841 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
4841 } 4842 }
4842 if (copied_early) 4843 if (copied_early)
4843 tcp_cleanup_rbuf(sk, skb->len); 4844 tcp_cleanup_rbuf(sk, skb->len);
@@ -4860,7 +4861,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4860 if ((int)skb->truesize > sk->sk_forward_alloc) 4861 if ((int)skb->truesize > sk->sk_forward_alloc)
4861 goto step5; 4862 goto step5;
4862 4863
4863 NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS); 4864 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
4864 4865
4865 /* Bulk data transfer: receiver */ 4866 /* Bulk data transfer: receiver */
4866 __skb_pull(skb, tcp_header_len); 4867 __skb_pull(skb, tcp_header_len);
@@ -4904,7 +4905,7 @@ slow_path:
4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4905 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4905 tcp_paws_discard(sk, skb)) { 4906 tcp_paws_discard(sk, skb)) {
4906 if (!th->rst) { 4907 if (!th->rst) {
4907 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4908 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4908 tcp_send_dupack(sk, skb); 4909 tcp_send_dupack(sk, skb);
4909 goto discard; 4910 goto discard;
4910 } 4911 }
@@ -4940,7 +4941,7 @@ slow_path:
4940 4941
4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4942 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 4943 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4943 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 4944 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4944 tcp_reset(sk); 4945 tcp_reset(sk);
4945 return 1; 4946 return 1;
4946 } 4947 }
@@ -4996,7 +4997,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4996 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4997 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4997 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 4998 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
4998 tcp_time_stamp)) { 4999 tcp_time_stamp)) {
4999 NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED); 5000 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
5000 goto reset_and_undo; 5001 goto reset_and_undo;
5001 } 5002 }
5002 5003
@@ -5280,7 +5281,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5281 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5281 tcp_paws_discard(sk, skb)) { 5282 tcp_paws_discard(sk, skb)) {
5282 if (!th->rst) { 5283 if (!th->rst) {
5283 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 5284 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5284 tcp_send_dupack(sk, skb); 5285 tcp_send_dupack(sk, skb);
5285 goto discard; 5286 goto discard;
5286 } 5287 }
@@ -5309,7 +5310,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5309 * Check for a SYN in window. 5310 * Check for a SYN in window.
5310 */ 5311 */
5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5312 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5312 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); 5313 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5313 tcp_reset(sk); 5314 tcp_reset(sk);
5314 return 1; 5315 return 1;
5315 } 5316 }
@@ -5391,7 +5392,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5391 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5392 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5392 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5393 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5393 tcp_done(sk); 5394 tcp_done(sk);
5394 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5395 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5395 return 1; 5396 return 1;
5396 } 5397 }
5397 5398
@@ -5451,7 +5452,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5451 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5452 if (sk->sk_shutdown & RCV_SHUTDOWN) {
5452 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5453 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5453 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5454 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5454 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA); 5455 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5455 tcp_reset(sk); 5456 tcp_reset(sk);
5456 return 1; 5457 return 1;
5457 } 5458 }