aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-27 19:44:39 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-27 22:48:24 -0400
commit02a1d6e7a6bb025a77da77012190e1efc1970f1c (patch)
tree79fdbbaa1812a45cff7148cdaca96685e2c1a287 /net/ipv4/tcp_ipv4.c
parentb15084ec7d4c89000242d69b5f57b4d138bad1b9 (diff)
net: rename NET_{ADD|INC}_STATS_BH()
Rename NET_INC_STATS_BH() to __NET_INC_STATS() and NET_ADD_STATS_BH() to __NET_ADD_STATS() Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 378e92d41c6c..510f7a3c758b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
320 * an established socket here. 320 * an established socket here.
321 */ 321 */
322 if (seq != tcp_rsk(req)->snt_isn) { 322 if (seq != tcp_rsk(req)->snt_isn) {
323 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 323 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
324 } else if (abort) { 324 } else if (abort) {
325 /* 325 /*
326 * Still in SYN_RECV, just remove it silently. 326 * Still in SYN_RECV, just remove it silently.
@@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
396 */ 396 */
397 if (sock_owned_by_user(sk)) { 397 if (sock_owned_by_user(sk)) {
398 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) 398 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
399 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); 399 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
400 } 400 }
401 if (sk->sk_state == TCP_CLOSE) 401 if (sk->sk_state == TCP_CLOSE)
402 goto out; 402 goto out;
403 403
404 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { 404 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
405 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 405 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
406 goto out; 406 goto out;
407 } 407 }
408 408
@@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
413 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; 413 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
414 if (sk->sk_state != TCP_LISTEN && 414 if (sk->sk_state != TCP_LISTEN &&
415 !between(seq, snd_una, tp->snd_nxt)) { 415 !between(seq, snd_una, tp->snd_nxt)) {
416 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 416 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
417 goto out; 417 goto out;
418 } 418 }
419 419
@@ -1151,12 +1151,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1151 return false; 1151 return false;
1152 1152
1153 if (hash_expected && !hash_location) { 1153 if (hash_expected && !hash_location) {
1154 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1154 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1155 return true; 1155 return true;
1156 } 1156 }
1157 1157
1158 if (!hash_expected && hash_location) { 1158 if (!hash_expected && hash_location) {
1159 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1159 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1160 return true; 1160 return true;
1161 } 1161 }
1162 1162
@@ -1342,7 +1342,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1342 return newsk; 1342 return newsk;
1343 1343
1344exit_overflow: 1344exit_overflow:
1345 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1345 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1346exit_nonewsk: 1346exit_nonewsk:
1347 dst_release(dst); 1347 dst_release(dst);
1348exit: 1348exit:
@@ -1513,8 +1513,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1513 1513
1514 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { 1514 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1515 sk_backlog_rcv(sk, skb1); 1515 sk_backlog_rcv(sk, skb1);
1516 NET_INC_STATS_BH(sock_net(sk), 1516 __NET_INC_STATS(sock_net(sk),
1517 LINUX_MIB_TCPPREQUEUEDROPPED); 1517 LINUX_MIB_TCPPREQUEUEDROPPED);
1518 } 1518 }
1519 1519
1520 tp->ucopy.memory = 0; 1520 tp->ucopy.memory = 0;
@@ -1629,7 +1629,7 @@ process:
1629 } 1629 }
1630 } 1630 }
1631 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { 1631 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1632 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); 1632 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1633 goto discard_and_relse; 1633 goto discard_and_relse;
1634 } 1634 }
1635 1635
@@ -1662,7 +1662,7 @@ process:
1662 } else if (unlikely(sk_add_backlog(sk, skb, 1662 } else if (unlikely(sk_add_backlog(sk, skb,
1663 sk->sk_rcvbuf + sk->sk_sndbuf))) { 1663 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1664 bh_unlock_sock(sk); 1664 bh_unlock_sock(sk);
1665 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1665 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1666 goto discard_and_relse; 1666 goto discard_and_relse;
1667 } 1667 }
1668 bh_unlock_sock(sk); 1668 bh_unlock_sock(sk);