aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c96
1 files changed, 48 insertions, 48 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1fb19c91e091..ac85fb42a5a2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
869 else 869 else
870 mib_idx = LINUX_MIB_TCPSACKREORDER; 870 mib_idx = LINUX_MIB_TCPSACKREORDER;
871 871
872 __NET_INC_STATS(sock_net(sk), mib_idx); 872 NET_INC_STATS(sock_net(sk), mib_idx);
873#if FASTRETRANS_DEBUG > 1 873#if FASTRETRANS_DEBUG > 1
874 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 874 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
875 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 875 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1062 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1062 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1063 dup_sack = true; 1063 dup_sack = true;
1064 tcp_dsack_seen(tp); 1064 tcp_dsack_seen(tp);
1065 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1065 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1066 } else if (num_sacks > 1) { 1066 } else if (num_sacks > 1) {
1067 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1067 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1068 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1068 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1071 !before(start_seq_0, start_seq_1)) { 1071 !before(start_seq_0, start_seq_1)) {
1072 dup_sack = true; 1072 dup_sack = true;
1073 tcp_dsack_seen(tp); 1073 tcp_dsack_seen(tp);
1074 __NET_INC_STATS(sock_net(sk), 1074 NET_INC_STATS(sock_net(sk),
1075 LINUX_MIB_TCPDSACKOFORECV); 1075 LINUX_MIB_TCPDSACKOFORECV);
1076 } 1076 }
1077 } 1077 }
@@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1289 1289
1290 if (skb->len > 0) { 1290 if (skb->len > 0) {
1291 BUG_ON(!tcp_skb_pcount(skb)); 1291 BUG_ON(!tcp_skb_pcount(skb));
1292 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1292 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1293 return false; 1293 return false;
1294 } 1294 }
1295 1295
@@ -1314,7 +1314,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1314 tcp_unlink_write_queue(skb, sk); 1314 tcp_unlink_write_queue(skb, sk);
1315 sk_wmem_free_skb(sk, skb); 1315 sk_wmem_free_skb(sk, skb);
1316 1316
1317 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); 1317 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1318 1318
1319 return true; 1319 return true;
1320} 1320}
@@ -1473,7 +1473,7 @@ noop:
1473 return skb; 1473 return skb;
1474 1474
1475fallback: 1475fallback:
1476 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1476 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
1477 return NULL; 1477 return NULL;
1478} 1478}
1479 1479
@@ -1661,7 +1661,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1661 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1661 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1662 } 1662 }
1663 1663
1664 __NET_INC_STATS(sock_net(sk), mib_idx); 1664 NET_INC_STATS(sock_net(sk), mib_idx);
1665 if (i == 0) 1665 if (i == 0)
1666 first_sack_index = -1; 1666 first_sack_index = -1;
1667 continue; 1667 continue;
@@ -1913,7 +1913,7 @@ void tcp_enter_loss(struct sock *sk)
1913 skb = tcp_write_queue_head(sk); 1913 skb = tcp_write_queue_head(sk);
1914 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); 1914 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
1915 if (is_reneg) { 1915 if (is_reneg) {
1916 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1916 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1917 tp->sacked_out = 0; 1917 tp->sacked_out = 0;
1918 tp->fackets_out = 0; 1918 tp->fackets_out = 0;
1919 } 1919 }
@@ -2399,7 +2399,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2399 else 2399 else
2400 mib_idx = LINUX_MIB_TCPFULLUNDO; 2400 mib_idx = LINUX_MIB_TCPFULLUNDO;
2401 2401
2402 __NET_INC_STATS(sock_net(sk), mib_idx); 2402 NET_INC_STATS(sock_net(sk), mib_idx);
2403 } 2403 }
2404 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2404 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2405 /* Hold old state until something *above* high_seq 2405 /* Hold old state until something *above* high_seq
@@ -2421,7 +2421,7 @@ static bool tcp_try_undo_dsack(struct sock *sk)
2421 if (tp->undo_marker && !tp->undo_retrans) { 2421 if (tp->undo_marker && !tp->undo_retrans) {
2422 DBGUNDO(sk, "D-SACK"); 2422 DBGUNDO(sk, "D-SACK");
2423 tcp_undo_cwnd_reduction(sk, false); 2423 tcp_undo_cwnd_reduction(sk, false);
2424 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2424 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2425 return true; 2425 return true;
2426 } 2426 }
2427 return false; 2427 return false;
@@ -2436,9 +2436,9 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2436 tcp_undo_cwnd_reduction(sk, true); 2436 tcp_undo_cwnd_reduction(sk, true);
2437 2437
2438 DBGUNDO(sk, "partial loss"); 2438 DBGUNDO(sk, "partial loss");
2439 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2439 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2440 if (frto_undo) 2440 if (frto_undo)
2441 __NET_INC_STATS(sock_net(sk), 2441 NET_INC_STATS(sock_net(sk),
2442 LINUX_MIB_TCPSPURIOUSRTOS); 2442 LINUX_MIB_TCPSPURIOUSRTOS);
2443 inet_csk(sk)->icsk_retransmits = 0; 2443 inet_csk(sk)->icsk_retransmits = 0;
2444 if (frto_undo || tcp_is_sack(tp)) 2444 if (frto_undo || tcp_is_sack(tp))
@@ -2563,7 +2563,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
2563 2563
2564 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2564 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2565 icsk->icsk_mtup.probe_size = 0; 2565 icsk->icsk_mtup.probe_size = 0;
2566 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); 2566 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
2567} 2567}
2568 2568
2569static void tcp_mtup_probe_success(struct sock *sk) 2569static void tcp_mtup_probe_success(struct sock *sk)
@@ -2583,7 +2583,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
2583 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2583 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2584 icsk->icsk_mtup.probe_size = 0; 2584 icsk->icsk_mtup.probe_size = 0;
2585 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2585 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2586 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); 2586 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
2587} 2587}
2588 2588
2589/* Do a simple retransmit without using the backoff mechanisms in 2589/* Do a simple retransmit without using the backoff mechanisms in
@@ -2647,7 +2647,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2647 else 2647 else
2648 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2648 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2649 2649
2650 __NET_INC_STATS(sock_net(sk), mib_idx); 2650 NET_INC_STATS(sock_net(sk), mib_idx);
2651 2651
2652 tp->prior_ssthresh = 0; 2652 tp->prior_ssthresh = 0;
2653 tcp_init_undo(tp); 2653 tcp_init_undo(tp);
@@ -2740,7 +2740,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
2740 2740
2741 DBGUNDO(sk, "partial recovery"); 2741 DBGUNDO(sk, "partial recovery");
2742 tcp_undo_cwnd_reduction(sk, true); 2742 tcp_undo_cwnd_reduction(sk, true);
2743 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2743 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2744 tcp_try_keep_open(sk); 2744 tcp_try_keep_open(sk);
2745 return true; 2745 return true;
2746 } 2746 }
@@ -3434,7 +3434,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3434 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); 3434 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
3435 3435
3436 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { 3436 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
3437 __NET_INC_STATS(net, mib_idx); 3437 NET_INC_STATS(net, mib_idx);
3438 return true; /* rate-limited: don't send yet! */ 3438 return true; /* rate-limited: don't send yet! */
3439 } 3439 }
3440 } 3440 }
@@ -3467,7 +3467,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
3467 challenge_count = 0; 3467 challenge_count = 0;
3468 } 3468 }
3469 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { 3469 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
3470 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); 3470 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
3471 tcp_send_ack(sk); 3471 tcp_send_ack(sk);
3472 } 3472 }
3473} 3473}
@@ -3516,7 +3516,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3516 tcp_set_ca_state(sk, TCP_CA_CWR); 3516 tcp_set_ca_state(sk, TCP_CA_CWR);
3517 tcp_end_cwnd_reduction(sk); 3517 tcp_end_cwnd_reduction(sk);
3518 tcp_try_keep_open(sk); 3518 tcp_try_keep_open(sk);
3519 __NET_INC_STATS(sock_net(sk), 3519 NET_INC_STATS(sock_net(sk),
3520 LINUX_MIB_TCPLOSSPROBERECOVERY); 3520 LINUX_MIB_TCPLOSSPROBERECOVERY);
3521 } else if (!(flag & (FLAG_SND_UNA_ADVANCED | 3521 } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
3522 FLAG_NOT_DUP | FLAG_DATA_SACKED))) { 3522 FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
@@ -3621,14 +3621,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3621 3621
3622 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); 3622 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
3623 3623
3624 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); 3624 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
3625 } else { 3625 } else {
3626 u32 ack_ev_flags = CA_ACK_SLOWPATH; 3626 u32 ack_ev_flags = CA_ACK_SLOWPATH;
3627 3627
3628 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3628 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3629 flag |= FLAG_DATA; 3629 flag |= FLAG_DATA;
3630 else 3630 else
3631 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3631 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3632 3632
3633 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3633 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3634 3634
@@ -4131,7 +4131,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4131 else 4131 else
4132 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4132 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4133 4133
4134 __NET_INC_STATS(sock_net(sk), mib_idx); 4134 NET_INC_STATS(sock_net(sk), mib_idx);
4135 4135
4136 tp->rx_opt.dsack = 1; 4136 tp->rx_opt.dsack = 1;
4137 tp->duplicate_sack[0].start_seq = seq; 4137 tp->duplicate_sack[0].start_seq = seq;
@@ -4155,7 +4155,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4155 4155
4156 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4156 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4157 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4157 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4158 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4158 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4159 tcp_enter_quickack_mode(sk); 4159 tcp_enter_quickack_mode(sk);
4160 4160
4161 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4161 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4305,7 +4305,7 @@ static bool tcp_try_coalesce(struct sock *sk,
4305 4305
4306 atomic_add(delta, &sk->sk_rmem_alloc); 4306 atomic_add(delta, &sk->sk_rmem_alloc);
4307 sk_mem_charge(sk, delta); 4307 sk_mem_charge(sk, delta);
4308 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4308 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4309 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4309 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4310 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4310 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4311 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; 4311 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
@@ -4393,7 +4393,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4393 tcp_ecn_check_ce(tp, skb); 4393 tcp_ecn_check_ce(tp, skb);
4394 4394
4395 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4395 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4396 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); 4396 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
4397 tcp_drop(sk, skb); 4397 tcp_drop(sk, skb);
4398 return; 4398 return;
4399 } 4399 }
@@ -4402,7 +4402,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4402 tp->pred_flags = 0; 4402 tp->pred_flags = 0;
4403 inet_csk_schedule_ack(sk); 4403 inet_csk_schedule_ack(sk);
4404 4404
4405 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); 4405 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
4406 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4406 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4407 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4407 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4408 4408
@@ -4457,7 +4457,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4457 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4457 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4458 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4458 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4459 /* All the bits are present. Drop. */ 4459 /* All the bits are present. Drop. */
4460 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4460 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4461 tcp_drop(sk, skb); 4461 tcp_drop(sk, skb);
4462 skb = NULL; 4462 skb = NULL;
4463 tcp_dsack_set(sk, seq, end_seq); 4463 tcp_dsack_set(sk, seq, end_seq);
@@ -4496,7 +4496,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4496 __skb_unlink(skb1, &tp->out_of_order_queue); 4496 __skb_unlink(skb1, &tp->out_of_order_queue);
4497 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4497 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4498 TCP_SKB_CB(skb1)->end_seq); 4498 TCP_SKB_CB(skb1)->end_seq);
4499 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4499 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4500 tcp_drop(sk, skb1); 4500 tcp_drop(sk, skb1);
4501 } 4501 }
4502 4502
@@ -4661,7 +4661,7 @@ queue_and_out:
4661 4661
4662 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4662 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4663 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4663 /* A retransmit, 2nd most common case. Force an immediate ack. */
4664 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4664 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4665 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4665 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4666 4666
4667out_of_window: 4667out_of_window:
@@ -4707,7 +4707,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4707 4707
4708 __skb_unlink(skb, list); 4708 __skb_unlink(skb, list);
4709 __kfree_skb(skb); 4709 __kfree_skb(skb);
4710 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4710 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4711 4711
4712 return next; 4712 return next;
4713} 4713}
@@ -4866,7 +4866,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
4866 bool res = false; 4866 bool res = false;
4867 4867
4868 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4868 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4869 __NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 4869 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
4870 __skb_queue_purge(&tp->out_of_order_queue); 4870 __skb_queue_purge(&tp->out_of_order_queue);
4871 4871
4872 /* Reset SACK state. A conforming SACK implementation will 4872 /* Reset SACK state. A conforming SACK implementation will
@@ -4895,7 +4895,7 @@ static int tcp_prune_queue(struct sock *sk)
4895 4895
4896 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4896 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4897 4897
4898 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); 4898 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
4899 4899
4900 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4900 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4901 tcp_clamp_window(sk); 4901 tcp_clamp_window(sk);
@@ -4925,7 +4925,7 @@ static int tcp_prune_queue(struct sock *sk)
4925 * drop receive data on the floor. It will get retransmitted 4925 * drop receive data on the floor. It will get retransmitted
4926 * and hopefully then we'll have sufficient space. 4926 * and hopefully then we'll have sufficient space.
4927 */ 4927 */
4928 __NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); 4928 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
4929 4929
4930 /* Massive buffer overcommit. */ 4930 /* Massive buffer overcommit. */
4931 tp->pred_flags = 0; 4931 tp->pred_flags = 0;
@@ -5184,7 +5184,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5184 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5184 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5185 tcp_paws_discard(sk, skb)) { 5185 tcp_paws_discard(sk, skb)) {
5186 if (!th->rst) { 5186 if (!th->rst) {
5187 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5187 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5188 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5188 if (!tcp_oow_rate_limited(sock_net(sk), skb,
5189 LINUX_MIB_TCPACKSKIPPEDPAWS, 5189 LINUX_MIB_TCPACKSKIPPEDPAWS,
5190 &tp->last_oow_ack_time)) 5190 &tp->last_oow_ack_time))
@@ -5236,8 +5236,8 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5236 if (th->syn) { 5236 if (th->syn) {
5237syn_challenge: 5237syn_challenge:
5238 if (syn_inerr) 5238 if (syn_inerr)
5239 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5239 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5240 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5240 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5241 tcp_send_challenge_ack(sk, skb); 5241 tcp_send_challenge_ack(sk, skb);
5242 goto discard; 5242 goto discard;
5243 } 5243 }
@@ -5352,7 +5352,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5352 tcp_data_snd_check(sk); 5352 tcp_data_snd_check(sk);
5353 return; 5353 return;
5354 } else { /* Header too small */ 5354 } else { /* Header too small */
5355 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5355 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5356 goto discard; 5356 goto discard;
5357 } 5357 }
5358 } else { 5358 } else {
@@ -5380,7 +5380,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5380 5380
5381 __skb_pull(skb, tcp_header_len); 5381 __skb_pull(skb, tcp_header_len);
5382 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 5382 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
5383 __NET_INC_STATS(sock_net(sk), 5383 NET_INC_STATS(sock_net(sk),
5384 LINUX_MIB_TCPHPHITSTOUSER); 5384 LINUX_MIB_TCPHPHITSTOUSER);
5385 eaten = 1; 5385 eaten = 1;
5386 } 5386 }
@@ -5403,7 +5403,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5403 5403
5404 tcp_rcv_rtt_measure_ts(sk, skb); 5404 tcp_rcv_rtt_measure_ts(sk, skb);
5405 5405
5406 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); 5406 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
5407 5407
5408 /* Bulk data transfer: receiver */ 5408 /* Bulk data transfer: receiver */
5409 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5409 eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
@@ -5460,8 +5460,8 @@ step5:
5460 return; 5460 return;
5461 5461
5462csum_error: 5462csum_error:
5463 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 5463 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
5464 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5464 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5465 5465
5466discard: 5466discard:
5467 tcp_drop(sk, skb); 5467 tcp_drop(sk, skb);
@@ -5553,13 +5553,13 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5553 break; 5553 break;
5554 } 5554 }
5555 tcp_rearm_rto(sk); 5555 tcp_rearm_rto(sk);
5556 __NET_INC_STATS(sock_net(sk), 5556 NET_INC_STATS(sock_net(sk),
5557 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 5557 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
5558 return true; 5558 return true;
5559 } 5559 }
5560 tp->syn_data_acked = tp->syn_data; 5560 tp->syn_data_acked = tp->syn_data;
5561 if (tp->syn_data_acked) 5561 if (tp->syn_data_acked)
5562 __NET_INC_STATS(sock_net(sk), 5562 NET_INC_STATS(sock_net(sk),
5563 LINUX_MIB_TCPFASTOPENACTIVE); 5563 LINUX_MIB_TCPFASTOPENACTIVE);
5564 5564
5565 tcp_fastopen_add_skb(sk, synack); 5565 tcp_fastopen_add_skb(sk, synack);
@@ -5595,7 +5595,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5595 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5595 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
5596 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5596 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
5597 tcp_time_stamp)) { 5597 tcp_time_stamp)) {
5598 __NET_INC_STATS(sock_net(sk), 5598 NET_INC_STATS(sock_net(sk),
5599 LINUX_MIB_PAWSACTIVEREJECTED); 5599 LINUX_MIB_PAWSACTIVEREJECTED);
5600 goto reset_and_undo; 5600 goto reset_and_undo;
5601 } 5601 }
@@ -5965,7 +5965,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5965 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5965 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5966 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5966 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5967 tcp_done(sk); 5967 tcp_done(sk);
5968 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5968 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5969 return 1; 5969 return 1;
5970 } 5970 }
5971 5971
@@ -6022,7 +6022,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
6022 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6022 if (sk->sk_shutdown & RCV_SHUTDOWN) {
6023 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6023 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6024 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6024 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
6025 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6025 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6026 tcp_reset(sk); 6026 tcp_reset(sk);
6027 return 1; 6027 return 1;
6028 } 6028 }
@@ -6224,7 +6224,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6224 * timeout. 6224 * timeout.
6225 */ 6225 */
6226 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 6226 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
6227 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 6227 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
6228 goto drop; 6228 goto drop;
6229 } 6229 }
6230 6230
@@ -6271,7 +6271,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6271 if (dst && strict && 6271 if (dst && strict &&
6272 !tcp_peer_is_proven(req, dst, true, 6272 !tcp_peer_is_proven(req, dst, true,
6273 tmp_opt.saw_tstamp)) { 6273 tmp_opt.saw_tstamp)) {
6274 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 6274 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
6275 goto drop_and_release; 6275 goto drop_and_release;
6276 } 6276 }
6277 } 6277 }