aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-27 19:44:27 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-27 22:48:22 -0400
commit6aef70a851ac77967992340faaff33f44598f60a (patch)
treefe2e05554f7901d9ff1349b403e11d1e95874fbc /net/ipv4/tcp.c
parent2995aea5b68b6850e76aadd95be777cb36949e62 (diff)
net: snmp: kill various STATS_USER() helpers
In the old days (before linux-3.0), SNMP counters were duplicated, one for user context, and one for BH context. After commit 8f0ea0fe3a03 ("snmp: reduce percpu needs by 50%") we have a single copy, and what really matters is preemption being enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc() respectively. We therefore kill SNMP_INC_STATS_USER(), SNMP_ADD_STATS_USER(), NET_INC_STATS_USER(), NET_ADD_STATS_USER(), SCTP_INC_STATS_USER(), SNMP_INC_STATS64_USER(), SNMP_ADD_STATS64_USER(), TCP_ADD_STATS_USER(), UDP_INC_STATS_USER(), UDP6_INC_STATS_USER(), and XFRM_INC_STATS_USER() Following patches will rename __BH helpers to make clear their usage is not tied to BH being disabled. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4d73858991af..55ef55ac9e38 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1443,7 +1443,7 @@ static void tcp_prequeue_process(struct sock *sk)
1443 struct sk_buff *skb; 1443 struct sk_buff *skb;
1444 struct tcp_sock *tp = tcp_sk(sk); 1444 struct tcp_sock *tp = tcp_sk(sk);
1445 1445
1446 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); 1446 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1447 1447
1448 /* RX process wants to run with disabled BHs, though it is not 1448 /* RX process wants to run with disabled BHs, though it is not
1449 * necessary */ 1449 * necessary */
@@ -1777,7 +1777,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1777 1777
1778 chunk = len - tp->ucopy.len; 1778 chunk = len - tp->ucopy.len;
1779 if (chunk != 0) { 1779 if (chunk != 0) {
1780 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1780 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1781 len -= chunk; 1781 len -= chunk;
1782 copied += chunk; 1782 copied += chunk;
1783 } 1783 }
@@ -1789,7 +1789,7 @@ do_prequeue:
1789 1789
1790 chunk = len - tp->ucopy.len; 1790 chunk = len - tp->ucopy.len;
1791 if (chunk != 0) { 1791 if (chunk != 0) {
1792 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1792 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1793 len -= chunk; 1793 len -= chunk;
1794 copied += chunk; 1794 copied += chunk;
1795 } 1795 }
@@ -1875,7 +1875,7 @@ skip_copy:
1875 tcp_prequeue_process(sk); 1875 tcp_prequeue_process(sk);
1876 1876
1877 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1877 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1878 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1878 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1879 len -= chunk; 1879 len -= chunk;
1880 copied += chunk; 1880 copied += chunk;
1881 } 1881 }
@@ -2065,13 +2065,13 @@ void tcp_close(struct sock *sk, long timeout)
2065 sk->sk_prot->disconnect(sk, 0); 2065 sk->sk_prot->disconnect(sk, 0);
2066 } else if (data_was_unread) { 2066 } else if (data_was_unread) {
2067 /* Unread data was tossed, zap the connection. */ 2067 /* Unread data was tossed, zap the connection. */
2068 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2068 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2069 tcp_set_state(sk, TCP_CLOSE); 2069 tcp_set_state(sk, TCP_CLOSE);
2070 tcp_send_active_reset(sk, sk->sk_allocation); 2070 tcp_send_active_reset(sk, sk->sk_allocation);
2071 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2071 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2072 /* Check zero linger _after_ checking for unread data. */ 2072 /* Check zero linger _after_ checking for unread data. */
2073 sk->sk_prot->disconnect(sk, 0); 2073 sk->sk_prot->disconnect(sk, 0);
2074 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2074 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2075 } else if (tcp_close_state(sk)) { 2075 } else if (tcp_close_state(sk)) {
2076 /* We FIN if the application ate all the data before 2076 /* We FIN if the application ate all the data before
2077 * zapping the connection. 2077 * zapping the connection.