summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-27 19:44:27 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-27 22:48:22 -0400
commit6aef70a851ac77967992340faaff33f44598f60a (patch)
treefe2e05554f7901d9ff1349b403e11d1e95874fbc
parent2995aea5b68b6850e76aadd95be777cb36949e62 (diff)
net: snmp: kill various STATS_USER() helpers
In the old days (before linux-3.0), SNMP counters were duplicated, one for user context, and one for BH context. After commit 8f0ea0fe3a03 ("snmp: reduce percpu needs by 50%") we have a single copy, and what really matters is preemption being enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc() respectively. We therefore kill SNMP_INC_STATS_USER(), SNMP_ADD_STATS_USER(), NET_INC_STATS_USER(), NET_ADD_STATS_USER(), SCTP_INC_STATS_USER(), SNMP_INC_STATS64_USER(), SNMP_ADD_STATS64_USER(), TCP_ADD_STATS_USER(), UDP_INC_STATS_USER(), UDP6_INC_STATS_USER(), and XFRM_INC_STATS_USER() Following patches will rename __BH helpers to make clear their usage is not tied to BH being disabled. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--include/net/snmp.h22
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/net/udp.h14
-rw-r--r--include/net/xfrm.h2
-rw-r--r--net/ipv4/tcp.c12
-rw-r--r--net/ipv4/udp.c24
-rw-r--r--net/ipv6/udp.c49
-rw-r--r--net/sctp/chunk.c2
10 files changed, 59 insertions, 78 deletions
diff --git a/include/net/ip.h b/include/net/ip.h
index 93725e546758..ae0e85d018e8 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -194,10 +194,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
194#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) 194#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val)
195#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) 195#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
196#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) 196#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
197#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
198#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) 197#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
199#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) 198#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
200#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
201 199
202u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); 200u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
203unsigned long snmp_fold_field(void __percpu *mib, int offt); 201unsigned long snmp_fold_field(void __percpu *mib, int offt);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 3f1c0ff7d4b6..5a2c4c3307a7 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -207,7 +207,6 @@ extern int sysctl_sctp_wmem[3];
207/* SCTP SNMP MIB stats handlers */ 207/* SCTP SNMP MIB stats handlers */
208#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) 208#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
209#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) 209#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
210#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field)
211#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) 210#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
212 211
213/* sctp mib definitions */ 212/* sctp mib definitions */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 35512ac6dcfb..56239fc05c51 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -126,9 +126,6 @@ struct linux_xfrm_mib {
126#define SNMP_INC_STATS_BH(mib, field) \ 126#define SNMP_INC_STATS_BH(mib, field) \
127 __this_cpu_inc(mib->mibs[field]) 127 __this_cpu_inc(mib->mibs[field])
128 128
129#define SNMP_INC_STATS_USER(mib, field) \
130 this_cpu_inc(mib->mibs[field])
131
132#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ 129#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
133 atomic_long_inc(&mib->mibs[field]) 130 atomic_long_inc(&mib->mibs[field])
134 131
@@ -141,9 +138,6 @@ struct linux_xfrm_mib {
141#define SNMP_ADD_STATS_BH(mib, field, addend) \ 138#define SNMP_ADD_STATS_BH(mib, field, addend) \
142 __this_cpu_add(mib->mibs[field], addend) 139 __this_cpu_add(mib->mibs[field], addend)
143 140
144#define SNMP_ADD_STATS_USER(mib, field, addend) \
145 this_cpu_add(mib->mibs[field], addend)
146
147#define SNMP_ADD_STATS(mib, field, addend) \ 141#define SNMP_ADD_STATS(mib, field, addend) \
148 this_cpu_add(mib->mibs[field], addend) 142 this_cpu_add(mib->mibs[field], addend)
149#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ 143#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
@@ -170,18 +164,14 @@ struct linux_xfrm_mib {
170 u64_stats_update_end(&ptr->syncp); \ 164 u64_stats_update_end(&ptr->syncp); \
171 } while (0) 165 } while (0)
172 166
173#define SNMP_ADD_STATS64_USER(mib, field, addend) \ 167#define SNMP_ADD_STATS64(mib, field, addend) \
174 do { \ 168 do { \
175 local_bh_disable(); \ 169 preempt_disable(); \
176 SNMP_ADD_STATS64_BH(mib, field, addend); \ 170 SNMP_ADD_STATS64_BH(mib, field, addend); \
177 local_bh_enable(); \ 171 preempt_enable(); \
178 } while (0) 172 } while (0)
179 173
180#define SNMP_ADD_STATS64(mib, field, addend) \
181 SNMP_ADD_STATS64_USER(mib, field, addend)
182
183#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1) 174#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
184#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
185#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) 175#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
186#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ 176#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
187 do { \ 177 do { \
@@ -194,17 +184,15 @@ struct linux_xfrm_mib {
194 } while (0) 184 } while (0)
195#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ 185#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
196 do { \ 186 do { \
197 local_bh_disable(); \ 187 preempt_disable(); \
198 SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \ 188 SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \
199 local_bh_enable(); \ 189 preempt_enable(); \
200 } while (0) 190 } while (0)
201#else 191#else
202#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) 192#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field)
203#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
204#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field) 193#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
205#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field) 194#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
206#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend) 195#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend)
207#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
208#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend) 196#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
209#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend) 197#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
210#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend) 198#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7f2553da10d1..cfe15f712164 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -334,7 +334,6 @@ extern struct proto tcp_prot;
334#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) 334#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
335#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) 335#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
336#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 336#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
337#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
338#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) 337#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
339 338
340void tcp_tasklet_init(void); 339void tcp_tasklet_init(void);
@@ -1298,10 +1297,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1298static inline void tcp_mib_init(struct net *net) 1297static inline void tcp_mib_init(struct net *net)
1299{ 1298{
1300 /* See RFC 2012 */ 1299 /* See RFC 2012 */
1301 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); 1300 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1302 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); 1301 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1303 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); 1302 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1304 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); 1303 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1305} 1304}
1306 1305
1307/* from STCP */ 1306/* from STCP */
diff --git a/include/net/udp.h b/include/net/udp.h
index 3c5a65e0946d..2f37f689d85a 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -289,20 +289,20 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
289/* 289/*
290 * SNMP statistics for UDP and UDP-Lite 290 * SNMP statistics for UDP and UDP-Lite
291 */ 291 */
292#define UDP_INC_STATS_USER(net, field, is_udplite) do { \ 292#define UDP_INC_STATS(net, field, is_udplite) do { \
293 if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \ 293 if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
294 else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0) 294 else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
295#define UDP_INC_STATS_BH(net, field, is_udplite) do { \ 295#define UDP_INC_STATS_BH(net, field, is_udplite) do { \
296 if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \ 296 if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \
297 else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0) 297 else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0)
298 298
299#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \ 299#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \
300 if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\ 300 if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\
301 else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \ 301 else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \
302} while(0) 302} while(0)
303#define UDP6_INC_STATS_USER(net, field, __lite) do { \ 303#define UDP6_INC_STATS(net, field, __lite) do { \
304 if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \ 304 if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
305 else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ 305 else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
306} while(0) 306} while(0)
307 307
308#if IS_ENABLED(CONFIG_IPV6) 308#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d6f6e5006ee9..dab9e1b82963 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -46,11 +46,9 @@
46#ifdef CONFIG_XFRM_STATISTICS 46#ifdef CONFIG_XFRM_STATISTICS
47#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) 47#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
48#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field) 48#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
49#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
50#else 49#else
51#define XFRM_INC_STATS(net, field) ((void)(net)) 50#define XFRM_INC_STATS(net, field) ((void)(net))
52#define XFRM_INC_STATS_BH(net, field) ((void)(net)) 51#define XFRM_INC_STATS_BH(net, field) ((void)(net))
53#define XFRM_INC_STATS_USER(net, field) ((void)(net))
54#endif 52#endif
55 53
56 54
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4d73858991af..55ef55ac9e38 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1443,7 +1443,7 @@ static void tcp_prequeue_process(struct sock *sk)
1443 struct sk_buff *skb; 1443 struct sk_buff *skb;
1444 struct tcp_sock *tp = tcp_sk(sk); 1444 struct tcp_sock *tp = tcp_sk(sk);
1445 1445
1446 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); 1446 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1447 1447
1448 /* RX process wants to run with disabled BHs, though it is not 1448 /* RX process wants to run with disabled BHs, though it is not
1449 * necessary */ 1449 * necessary */
@@ -1777,7 +1777,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1777 1777
1778 chunk = len - tp->ucopy.len; 1778 chunk = len - tp->ucopy.len;
1779 if (chunk != 0) { 1779 if (chunk != 0) {
1780 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); 1780 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1781 len -= chunk; 1781 len -= chunk;
1782 copied += chunk; 1782 copied += chunk;
1783 } 1783 }
@@ -1789,7 +1789,7 @@ do_prequeue:
1789 1789
1790 chunk = len - tp->ucopy.len; 1790 chunk = len - tp->ucopy.len;
1791 if (chunk != 0) { 1791 if (chunk != 0) {
1792 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1792 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1793 len -= chunk; 1793 len -= chunk;
1794 copied += chunk; 1794 copied += chunk;
1795 } 1795 }
@@ -1875,7 +1875,7 @@ skip_copy:
1875 tcp_prequeue_process(sk); 1875 tcp_prequeue_process(sk);
1876 1876
1877 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { 1877 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1878 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); 1878 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1879 len -= chunk; 1879 len -= chunk;
1880 copied += chunk; 1880 copied += chunk;
1881 } 1881 }
@@ -2065,13 +2065,13 @@ void tcp_close(struct sock *sk, long timeout)
2065 sk->sk_prot->disconnect(sk, 0); 2065 sk->sk_prot->disconnect(sk, 0);
2066 } else if (data_was_unread) { 2066 } else if (data_was_unread) {
2067 /* Unread data was tossed, zap the connection. */ 2067 /* Unread data was tossed, zap the connection. */
2068 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2068 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2069 tcp_set_state(sk, TCP_CLOSE); 2069 tcp_set_state(sk, TCP_CLOSE);
2070 tcp_send_active_reset(sk, sk->sk_allocation); 2070 tcp_send_active_reset(sk, sk->sk_allocation);
2071 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 2071 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2072 /* Check zero linger _after_ checking for unread data. */ 2072 /* Check zero linger _after_ checking for unread data. */
2073 sk->sk_prot->disconnect(sk, 0); 2073 sk->sk_prot->disconnect(sk, 0);
2074 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 2074 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2075 } else if (tcp_close_state(sk)) { 2075 } else if (tcp_close_state(sk)) {
2076 /* We FIN if the application ate all the data before 2076 /* We FIN if the application ate all the data before
2077 * zapping the connection. 2077 * zapping the connection.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 76ea0a8be090..00f5de9a155e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -882,13 +882,13 @@ send:
882 err = ip_send_skb(sock_net(sk), skb); 882 err = ip_send_skb(sock_net(sk), skb);
883 if (err) { 883 if (err) {
884 if (err == -ENOBUFS && !inet->recverr) { 884 if (err == -ENOBUFS && !inet->recverr) {
885 UDP_INC_STATS_USER(sock_net(sk), 885 UDP_INC_STATS(sock_net(sk),
886 UDP_MIB_SNDBUFERRORS, is_udplite); 886 UDP_MIB_SNDBUFERRORS, is_udplite);
887 err = 0; 887 err = 0;
888 } 888 }
889 } else 889 } else
890 UDP_INC_STATS_USER(sock_net(sk), 890 UDP_INC_STATS(sock_net(sk),
891 UDP_MIB_OUTDATAGRAMS, is_udplite); 891 UDP_MIB_OUTDATAGRAMS, is_udplite);
892 return err; 892 return err;
893} 893}
894 894
@@ -1157,8 +1157,8 @@ out:
1157 * seems like overkill. 1157 * seems like overkill.
1158 */ 1158 */
1159 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1159 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1160 UDP_INC_STATS_USER(sock_net(sk), 1160 UDP_INC_STATS(sock_net(sk),
1161 UDP_MIB_SNDBUFERRORS, is_udplite); 1161 UDP_MIB_SNDBUFERRORS, is_udplite);
1162 } 1162 }
1163 return err; 1163 return err;
1164 1164
@@ -1352,16 +1352,16 @@ try_again:
1352 trace_kfree_skb(skb, udp_recvmsg); 1352 trace_kfree_skb(skb, udp_recvmsg);
1353 if (!peeked) { 1353 if (!peeked) {
1354 atomic_inc(&sk->sk_drops); 1354 atomic_inc(&sk->sk_drops);
1355 UDP_INC_STATS_USER(sock_net(sk), 1355 UDP_INC_STATS(sock_net(sk),
1356 UDP_MIB_INERRORS, is_udplite); 1356 UDP_MIB_INERRORS, is_udplite);
1357 } 1357 }
1358 skb_free_datagram_locked(sk, skb); 1358 skb_free_datagram_locked(sk, skb);
1359 return err; 1359 return err;
1360 } 1360 }
1361 1361
1362 if (!peeked) 1362 if (!peeked)
1363 UDP_INC_STATS_USER(sock_net(sk), 1363 UDP_INC_STATS(sock_net(sk),
1364 UDP_MIB_INDATAGRAMS, is_udplite); 1364 UDP_MIB_INDATAGRAMS, is_udplite);
1365 1365
1366 sock_recv_ts_and_drops(msg, sk, skb); 1366 sock_recv_ts_and_drops(msg, sk, skb);
1367 1367
@@ -1386,8 +1386,8 @@ try_again:
1386csum_copy_err: 1386csum_copy_err:
1387 slow = lock_sock_fast(sk); 1387 slow = lock_sock_fast(sk);
1388 if (!skb_kill_datagram(sk, skb, flags)) { 1388 if (!skb_kill_datagram(sk, skb, flags)) {
1389 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1389 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
1390 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1390 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1391 } 1391 }
1392 unlock_sock_fast(sk, slow); 1392 unlock_sock_fast(sk, slow);
1393 1393
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8d8b2cd8ec5b..baa56ca41a31 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -423,24 +423,22 @@ try_again:
423 if (!peeked) { 423 if (!peeked) {
424 atomic_inc(&sk->sk_drops); 424 atomic_inc(&sk->sk_drops);
425 if (is_udp4) 425 if (is_udp4)
426 UDP_INC_STATS_USER(sock_net(sk), 426 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
427 UDP_MIB_INERRORS, 427 is_udplite);
428 is_udplite);
429 else 428 else
430 UDP6_INC_STATS_USER(sock_net(sk), 429 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
431 UDP_MIB_INERRORS, 430 is_udplite);
432 is_udplite);
433 } 431 }
434 skb_free_datagram_locked(sk, skb); 432 skb_free_datagram_locked(sk, skb);
435 return err; 433 return err;
436 } 434 }
437 if (!peeked) { 435 if (!peeked) {
438 if (is_udp4) 436 if (is_udp4)
439 UDP_INC_STATS_USER(sock_net(sk), 437 UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
440 UDP_MIB_INDATAGRAMS, is_udplite); 438 is_udplite);
441 else 439 else
442 UDP6_INC_STATS_USER(sock_net(sk), 440 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
443 UDP_MIB_INDATAGRAMS, is_udplite); 441 is_udplite);
444 } 442 }
445 443
446 sock_recv_ts_and_drops(msg, sk, skb); 444 sock_recv_ts_and_drops(msg, sk, skb);
@@ -487,15 +485,15 @@ csum_copy_err:
487 slow = lock_sock_fast(sk); 485 slow = lock_sock_fast(sk);
488 if (!skb_kill_datagram(sk, skb, flags)) { 486 if (!skb_kill_datagram(sk, skb, flags)) {
489 if (is_udp4) { 487 if (is_udp4) {
490 UDP_INC_STATS_USER(sock_net(sk), 488 UDP_INC_STATS(sock_net(sk),
491 UDP_MIB_CSUMERRORS, is_udplite); 489 UDP_MIB_CSUMERRORS, is_udplite);
492 UDP_INC_STATS_USER(sock_net(sk), 490 UDP_INC_STATS(sock_net(sk),
493 UDP_MIB_INERRORS, is_udplite); 491 UDP_MIB_INERRORS, is_udplite);
494 } else { 492 } else {
495 UDP6_INC_STATS_USER(sock_net(sk), 493 UDP6_INC_STATS(sock_net(sk),
496 UDP_MIB_CSUMERRORS, is_udplite); 494 UDP_MIB_CSUMERRORS, is_udplite);
497 UDP6_INC_STATS_USER(sock_net(sk), 495 UDP6_INC_STATS(sock_net(sk),
498 UDP_MIB_INERRORS, is_udplite); 496 UDP_MIB_INERRORS, is_udplite);
499 } 497 }
500 } 498 }
501 unlock_sock_fast(sk, slow); 499 unlock_sock_fast(sk, slow);
@@ -1015,13 +1013,14 @@ send:
1015 err = ip6_send_skb(skb); 1013 err = ip6_send_skb(skb);
1016 if (err) { 1014 if (err) {
1017 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { 1015 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1018 UDP6_INC_STATS_USER(sock_net(sk), 1016 UDP6_INC_STATS(sock_net(sk),
1019 UDP_MIB_SNDBUFERRORS, is_udplite); 1017 UDP_MIB_SNDBUFERRORS, is_udplite);
1020 err = 0; 1018 err = 0;
1021 } 1019 }
1022 } else 1020 } else {
1023 UDP6_INC_STATS_USER(sock_net(sk), 1021 UDP6_INC_STATS(sock_net(sk),
1024 UDP_MIB_OUTDATAGRAMS, is_udplite); 1022 UDP_MIB_OUTDATAGRAMS, is_udplite);
1023 }
1025 return err; 1024 return err;
1026} 1025}
1027 1026
@@ -1342,8 +1341,8 @@ out:
1342 * seems like overkill. 1341 * seems like overkill.
1343 */ 1342 */
1344 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1343 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1345 UDP6_INC_STATS_USER(sock_net(sk), 1344 UDP6_INC_STATS(sock_net(sk),
1346 UDP_MIB_SNDBUFERRORS, is_udplite); 1345 UDP_MIB_SNDBUFERRORS, is_udplite);
1347 } 1346 }
1348 return err; 1347 return err;
1349 1348
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 958ef5f33f4b..1eb94bf18ef4 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -239,7 +239,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
239 offset = 0; 239 offset = 0;
240 240
241 if ((whole > 1) || (whole && over)) 241 if ((whole > 1) || (whole && over))
242 SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); 242 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
243 243
244 /* Create chunks for all the full sized DATA chunks. */ 244 /* Create chunks for all the full sized DATA chunks. */
245 for (i = 0, len = first_len; i < whole; i++) { 245 for (i = 0, len = first_len; i < whole; i++) {