aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-27 19:44:39 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-27 22:48:24 -0400
commit02a1d6e7a6bb025a77da77012190e1efc1970f1c (patch)
tree79fdbbaa1812a45cff7148cdaca96685e2c1a287 /net/ipv4/tcp_input.c
parentb15084ec7d4c89000242d69b5f57b4d138bad1b9 (diff)
net: rename NET_{ADD|INC}_STATS_BH()
Rename NET_INC_STATS_BH() to __NET_INC_STATS() and NET_ADD_STATS_BH() to __NET_ADD_STATS() Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c100
1 files changed, 52 insertions, 48 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index dad8d93262ed..0d5239c283cb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
869 else 869 else
870 mib_idx = LINUX_MIB_TCPSACKREORDER; 870 mib_idx = LINUX_MIB_TCPSACKREORDER;
871 871
872 NET_INC_STATS_BH(sock_net(sk), mib_idx); 872 __NET_INC_STATS(sock_net(sk), mib_idx);
873#if FASTRETRANS_DEBUG > 1 873#if FASTRETRANS_DEBUG > 1
874 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 874 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
875 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 875 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1062 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1062 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1063 dup_sack = true; 1063 dup_sack = true;
1064 tcp_dsack_seen(tp); 1064 tcp_dsack_seen(tp);
1065 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1065 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1066 } else if (num_sacks > 1) { 1066 } else if (num_sacks > 1) {
1067 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1067 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1068 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1068 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1071 !before(start_seq_0, start_seq_1)) { 1071 !before(start_seq_0, start_seq_1)) {
1072 dup_sack = true; 1072 dup_sack = true;
1073 tcp_dsack_seen(tp); 1073 tcp_dsack_seen(tp);
1074 NET_INC_STATS_BH(sock_net(sk), 1074 __NET_INC_STATS(sock_net(sk),
1075 LINUX_MIB_TCPDSACKOFORECV); 1075 LINUX_MIB_TCPDSACKOFORECV);
1076 } 1076 }
1077 } 1077 }
@@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1289 1289
1290 if (skb->len > 0) { 1290 if (skb->len > 0) {
1291 BUG_ON(!tcp_skb_pcount(skb)); 1291 BUG_ON(!tcp_skb_pcount(skb));
1292 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1292 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1293 return false; 1293 return false;
1294 } 1294 }
1295 1295
@@ -1313,7 +1313,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1313 tcp_unlink_write_queue(skb, sk); 1313 tcp_unlink_write_queue(skb, sk);
1314 sk_wmem_free_skb(sk, skb); 1314 sk_wmem_free_skb(sk, skb);
1315 1315
1316 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1316 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1317 1317
1318 return true; 1318 return true;
1319} 1319}
@@ -1469,7 +1469,7 @@ noop:
1469 return skb; 1469 return skb;
1470 1470
1471fallback: 1471fallback:
1472 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1472 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
1473 return NULL; 1473 return NULL;
1474} 1474}
1475 1475
@@ -1657,7 +1657,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1657 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1657 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1658 } 1658 }
1659 1659
1660 NET_INC_STATS_BH(sock_net(sk), mib_idx); 1660 __NET_INC_STATS(sock_net(sk), mib_idx);
1661 if (i == 0) 1661 if (i == 0)
1662 first_sack_index = -1; 1662 first_sack_index = -1;
1663 continue; 1663 continue;
@@ -1909,7 +1909,7 @@ void tcp_enter_loss(struct sock *sk)
1909 skb = tcp_write_queue_head(sk); 1909 skb = tcp_write_queue_head(sk);
1910 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); 1910 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
1911 if (is_reneg) { 1911 if (is_reneg) {
1912 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1912 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
1913 tp->sacked_out = 0; 1913 tp->sacked_out = 0;
1914 tp->fackets_out = 0; 1914 tp->fackets_out = 0;
1915 } 1915 }
@@ -2395,7 +2395,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2395 else 2395 else
2396 mib_idx = LINUX_MIB_TCPFULLUNDO; 2396 mib_idx = LINUX_MIB_TCPFULLUNDO;
2397 2397
2398 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2398 __NET_INC_STATS(sock_net(sk), mib_idx);
2399 } 2399 }
2400 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2400 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2401 /* Hold old state until something *above* high_seq 2401 /* Hold old state until something *above* high_seq
@@ -2417,7 +2417,7 @@ static bool tcp_try_undo_dsack(struct sock *sk)
2417 if (tp->undo_marker && !tp->undo_retrans) { 2417 if (tp->undo_marker && !tp->undo_retrans) {
2418 DBGUNDO(sk, "D-SACK"); 2418 DBGUNDO(sk, "D-SACK");
2419 tcp_undo_cwnd_reduction(sk, false); 2419 tcp_undo_cwnd_reduction(sk, false);
2420 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2420 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2421 return true; 2421 return true;
2422 } 2422 }
2423 return false; 2423 return false;
@@ -2432,10 +2432,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2432 tcp_undo_cwnd_reduction(sk, true); 2432 tcp_undo_cwnd_reduction(sk, true);
2433 2433
2434 DBGUNDO(sk, "partial loss"); 2434 DBGUNDO(sk, "partial loss");
2435 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2435 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2436 if (frto_undo) 2436 if (frto_undo)
2437 NET_INC_STATS_BH(sock_net(sk), 2437 __NET_INC_STATS(sock_net(sk),
2438 LINUX_MIB_TCPSPURIOUSRTOS); 2438 LINUX_MIB_TCPSPURIOUSRTOS);
2439 inet_csk(sk)->icsk_retransmits = 0; 2439 inet_csk(sk)->icsk_retransmits = 0;
2440 if (frto_undo || tcp_is_sack(tp)) 2440 if (frto_undo || tcp_is_sack(tp))
2441 tcp_set_ca_state(sk, TCP_CA_Open); 2441 tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2559,7 +2559,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
2559 2559
2560 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2560 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2561 icsk->icsk_mtup.probe_size = 0; 2561 icsk->icsk_mtup.probe_size = 0;
2562 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); 2562 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
2563} 2563}
2564 2564
2565static void tcp_mtup_probe_success(struct sock *sk) 2565static void tcp_mtup_probe_success(struct sock *sk)
@@ -2579,7 +2579,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
2579 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2579 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2580 icsk->icsk_mtup.probe_size = 0; 2580 icsk->icsk_mtup.probe_size = 0;
2581 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2581 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2582 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); 2582 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
2583} 2583}
2584 2584
2585/* Do a simple retransmit without using the backoff mechanisms in 2585/* Do a simple retransmit without using the backoff mechanisms in
@@ -2643,7 +2643,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2643 else 2643 else
2644 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2644 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2645 2645
2646 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2646 __NET_INC_STATS(sock_net(sk), mib_idx);
2647 2647
2648 tp->prior_ssthresh = 0; 2648 tp->prior_ssthresh = 0;
2649 tcp_init_undo(tp); 2649 tcp_init_undo(tp);
@@ -2736,7 +2736,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
2736 2736
2737 DBGUNDO(sk, "partial recovery"); 2737 DBGUNDO(sk, "partial recovery");
2738 tcp_undo_cwnd_reduction(sk, true); 2738 tcp_undo_cwnd_reduction(sk, true);
2739 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2739 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2740 tcp_try_keep_open(sk); 2740 tcp_try_keep_open(sk);
2741 return true; 2741 return true;
2742 } 2742 }
@@ -3431,7 +3431,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3431 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); 3431 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
3432 3432
3433 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { 3433 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
3434 NET_INC_STATS_BH(net, mib_idx); 3434 __NET_INC_STATS(net, mib_idx);
3435 return true; /* rate-limited: don't send yet! */ 3435 return true; /* rate-limited: don't send yet! */
3436 } 3436 }
3437 } 3437 }
@@ -3464,7 +3464,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
3464 challenge_count = 0; 3464 challenge_count = 0;
3465 } 3465 }
3466 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { 3466 if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
3467 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); 3467 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
3468 tcp_send_ack(sk); 3468 tcp_send_ack(sk);
3469 } 3469 }
3470} 3470}
@@ -3513,8 +3513,8 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3513 tcp_set_ca_state(sk, TCP_CA_CWR); 3513 tcp_set_ca_state(sk, TCP_CA_CWR);
3514 tcp_end_cwnd_reduction(sk); 3514 tcp_end_cwnd_reduction(sk);
3515 tcp_try_keep_open(sk); 3515 tcp_try_keep_open(sk);
3516 NET_INC_STATS_BH(sock_net(sk), 3516 __NET_INC_STATS(sock_net(sk),
3517 LINUX_MIB_TCPLOSSPROBERECOVERY); 3517 LINUX_MIB_TCPLOSSPROBERECOVERY);
3518 } else if (!(flag & (FLAG_SND_UNA_ADVANCED | 3518 } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
3519 FLAG_NOT_DUP | FLAG_DATA_SACKED))) { 3519 FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
3520 /* Pure dupack: original and TLP probe arrived; no loss */ 3520 /* Pure dupack: original and TLP probe arrived; no loss */
@@ -3618,14 +3618,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3618 3618
3619 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); 3619 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
3620 3620
3621 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); 3621 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
3622 } else { 3622 } else {
3623 u32 ack_ev_flags = CA_ACK_SLOWPATH; 3623 u32 ack_ev_flags = CA_ACK_SLOWPATH;
3624 3624
3625 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3625 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3626 flag |= FLAG_DATA; 3626 flag |= FLAG_DATA;
3627 else 3627 else
3628 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3628 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3629 3629
3630 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3630 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3631 3631
@@ -4128,7 +4128,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4128 else 4128 else
4129 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4129 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4130 4130
4131 NET_INC_STATS_BH(sock_net(sk), mib_idx); 4131 __NET_INC_STATS(sock_net(sk), mib_idx);
4132 4132
4133 tp->rx_opt.dsack = 1; 4133 tp->rx_opt.dsack = 1;
4134 tp->duplicate_sack[0].start_seq = seq; 4134 tp->duplicate_sack[0].start_seq = seq;
@@ -4152,7 +4152,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4152 4152
4153 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4153 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4154 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4154 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4155 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4155 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4156 tcp_enter_quickack_mode(sk); 4156 tcp_enter_quickack_mode(sk);
4157 4157
4158 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4158 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@@ -4302,7 +4302,7 @@ static bool tcp_try_coalesce(struct sock *sk,
4302 4302
4303 atomic_add(delta, &sk->sk_rmem_alloc); 4303 atomic_add(delta, &sk->sk_rmem_alloc);
4304 sk_mem_charge(sk, delta); 4304 sk_mem_charge(sk, delta);
4305 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4305 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4306 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4306 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4307 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4307 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4308 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; 4308 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
@@ -4390,7 +4390,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4390 tcp_ecn_check_ce(tp, skb); 4390 tcp_ecn_check_ce(tp, skb);
4391 4391
4392 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4392 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4393 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); 4393 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
4394 tcp_drop(sk, skb); 4394 tcp_drop(sk, skb);
4395 return; 4395 return;
4396 } 4396 }
@@ -4399,7 +4399,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4399 tp->pred_flags = 0; 4399 tp->pred_flags = 0;
4400 inet_csk_schedule_ack(sk); 4400 inet_csk_schedule_ack(sk);
4401 4401
4402 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); 4402 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
4403 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4403 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4404 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4404 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4405 4405
@@ -4454,7 +4454,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4454 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4454 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4455 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4455 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4456 /* All the bits are present. Drop. */ 4456 /* All the bits are present. Drop. */
4457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4457 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4458 tcp_drop(sk, skb); 4458 tcp_drop(sk, skb);
4459 skb = NULL; 4459 skb = NULL;
4460 tcp_dsack_set(sk, seq, end_seq); 4460 tcp_dsack_set(sk, seq, end_seq);
@@ -4493,7 +4493,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4493 __skb_unlink(skb1, &tp->out_of_order_queue); 4493 __skb_unlink(skb1, &tp->out_of_order_queue);
4494 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4494 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4495 TCP_SKB_CB(skb1)->end_seq); 4495 TCP_SKB_CB(skb1)->end_seq);
4496 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4496 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4497 tcp_drop(sk, skb1); 4497 tcp_drop(sk, skb1);
4498 } 4498 }
4499 4499
@@ -4658,7 +4658,7 @@ queue_and_out:
4658 4658
4659 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4659 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4660 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4660 /* A retransmit, 2nd most common case. Force an immediate ack. */
4661 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4661 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4662 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4662 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4663 4663
4664out_of_window: 4664out_of_window:
@@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4704 4704
4705 __skb_unlink(skb, list); 4705 __skb_unlink(skb, list);
4706 __kfree_skb(skb); 4706 __kfree_skb(skb);
4707 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4707 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4708 4708
4709 return next; 4709 return next;
4710} 4710}
@@ -4863,7 +4863,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
4863 bool res = false; 4863 bool res = false;
4864 4864
4865 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4865 if (!skb_queue_empty(&tp->out_of_order_queue)) {
4866 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 4866 __NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
4867 __skb_queue_purge(&tp->out_of_order_queue); 4867 __skb_queue_purge(&tp->out_of_order_queue);
4868 4868
4869 /* Reset SACK state. A conforming SACK implementation will 4869 /* Reset SACK state. A conforming SACK implementation will
@@ -4892,7 +4892,7 @@ static int tcp_prune_queue(struct sock *sk)
4892 4892
4893 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4893 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4894 4894
4895 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); 4895 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
4896 4896
4897 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4897 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4898 tcp_clamp_window(sk); 4898 tcp_clamp_window(sk);
@@ -4922,7 +4922,7 @@ static int tcp_prune_queue(struct sock *sk)
4922 * drop receive data on the floor. It will get retransmitted 4922 * drop receive data on the floor. It will get retransmitted
4923 * and hopefully then we'll have sufficient space. 4923 * and hopefully then we'll have sufficient space.
4924 */ 4924 */
4925 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); 4925 __NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
4926 4926
4927 /* Massive buffer overcommit. */ 4927 /* Massive buffer overcommit. */
4928 tp->pred_flags = 0; 4928 tp->pred_flags = 0;
@@ -5181,7 +5181,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5181 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5181 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5182 tcp_paws_discard(sk, skb)) { 5182 tcp_paws_discard(sk, skb)) {
5183 if (!th->rst) { 5183 if (!th->rst) {
5184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5184 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5185 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5185 if (!tcp_oow_rate_limited(sock_net(sk), skb,
5186 LINUX_MIB_TCPACKSKIPPEDPAWS, 5186 LINUX_MIB_TCPACKSKIPPEDPAWS,
5187 &tp->last_oow_ack_time)) 5187 &tp->last_oow_ack_time))
@@ -5234,7 +5234,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5234syn_challenge: 5234syn_challenge:
5235 if (syn_inerr) 5235 if (syn_inerr)
5236 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5236 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5237 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5237 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5238 tcp_send_challenge_ack(sk, skb); 5238 tcp_send_challenge_ack(sk, skb);
5239 goto discard; 5239 goto discard;
5240 } 5240 }
@@ -5377,7 +5377,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5377 5377
5378 __skb_pull(skb, tcp_header_len); 5378 __skb_pull(skb, tcp_header_len);
5379 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 5379 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
5380 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 5380 __NET_INC_STATS(sock_net(sk),
5381 LINUX_MIB_TCPHPHITSTOUSER);
5381 eaten = 1; 5382 eaten = 1;
5382 } 5383 }
5383 } 5384 }
@@ -5399,7 +5400,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5399 5400
5400 tcp_rcv_rtt_measure_ts(sk, skb); 5401 tcp_rcv_rtt_measure_ts(sk, skb);
5401 5402
5402 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5403 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
5403 5404
5404 /* Bulk data transfer: receiver */ 5405 /* Bulk data transfer: receiver */
5405 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5406 eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
@@ -5549,12 +5550,14 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5549 break; 5550 break;
5550 } 5551 }
5551 tcp_rearm_rto(sk); 5552 tcp_rearm_rto(sk);
5552 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); 5553 __NET_INC_STATS(sock_net(sk),
5554 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
5553 return true; 5555 return true;
5554 } 5556 }
5555 tp->syn_data_acked = tp->syn_data; 5557 tp->syn_data_acked = tp->syn_data;
5556 if (tp->syn_data_acked) 5558 if (tp->syn_data_acked)
5557 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 5559 __NET_INC_STATS(sock_net(sk),
5560 LINUX_MIB_TCPFASTOPENACTIVE);
5558 5561
5559 tcp_fastopen_add_skb(sk, synack); 5562 tcp_fastopen_add_skb(sk, synack);
5560 5563
@@ -5589,7 +5592,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5589 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5592 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
5590 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5593 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
5591 tcp_time_stamp)) { 5594 tcp_time_stamp)) {
5592 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); 5595 __NET_INC_STATS(sock_net(sk),
5596 LINUX_MIB_PAWSACTIVEREJECTED);
5593 goto reset_and_undo; 5597 goto reset_and_undo;
5594 } 5598 }
5595 5599
@@ -5958,7 +5962,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5958 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5962 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5959 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5963 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5960 tcp_done(sk); 5964 tcp_done(sk);
5961 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5965 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5962 return 1; 5966 return 1;
5963 } 5967 }
5964 5968
@@ -6015,7 +6019,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
6015 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6019 if (sk->sk_shutdown & RCV_SHUTDOWN) {
6016 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6020 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6017 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6021 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
6018 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6022 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6019 tcp_reset(sk); 6023 tcp_reset(sk);
6020 return 1; 6024 return 1;
6021 } 6025 }
@@ -6153,10 +6157,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
6153 if (net->ipv4.sysctl_tcp_syncookies) { 6157 if (net->ipv4.sysctl_tcp_syncookies) {
6154 msg = "Sending cookies"; 6158 msg = "Sending cookies";
6155 want_cookie = true; 6159 want_cookie = true;
6156 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 6160 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
6157 } else 6161 } else
6158#endif 6162#endif
6159 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 6163 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
6160 6164
6161 if (!queue->synflood_warned && 6165 if (!queue->synflood_warned &&
6162 net->ipv4.sysctl_tcp_syncookies != 2 && 6166 net->ipv4.sysctl_tcp_syncookies != 2 &&
@@ -6217,7 +6221,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6217 * timeout. 6221 * timeout.
6218 */ 6222 */
6219 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { 6223 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
6220 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 6224 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
6221 goto drop; 6225 goto drop;
6222 } 6226 }
6223 6227
@@ -6264,7 +6268,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6264 if (dst && strict && 6268 if (dst && strict &&
6265 !tcp_peer_is_proven(req, dst, true, 6269 !tcp_peer_is_proven(req, dst, true,
6266 tmp_opt.saw_tstamp)) { 6270 tmp_opt.saw_tstamp)) {
6267 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); 6271 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
6268 goto drop_and_release; 6272 goto drop_and_release;
6269 } 6273 }
6270 } 6274 }