aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/tcp_ipv6.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-29 17:16:47 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-02 17:02:25 -0400
commitc10d9310edf5aa4a676991139d1a43ec7d87e56b (patch)
tree8ba2a9ec1a83011976369f96481220cd7945eab6 /net/ipv6/tcp_ipv6.c
parent5e59c83f2335d634946e1a4fe7bc1e921930f132 (diff)
tcp: do not assume TCP code is non preemptible
We want to to make TCP stack preemptible, as draining prequeue and backlog queues can take lot of time. Many SNMP updates were assuming that BH (and preemption) was disabled. Need to convert some __NET_INC_STATS() calls to NET_INC_STATS() and some __TCP_INC_STATS() to TCP_INC_STATS() Before using this_cpu_ptr(net->ipv4.tcp_sk) in tcp_v4_send_reset() and tcp_v4_send_ack(), we add an explicit preempt disabled section. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/tcp_ipv6.c')
-rw-r--r--net/ipv6/tcp_ipv6.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 52914714b923..7bdc9c9c231b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
649 return false; 649 return false;
650 650
651 if (hash_expected && !hash_location) { 651 if (hash_expected && !hash_location) {
652 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 652 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
653 return true; 653 return true;
654 } 654 }
655 655
656 if (!hash_expected && hash_location) { 656 if (!hash_expected && hash_location) {
657 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 657 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
658 return true; 658 return true;
659 } 659 }
660 660
@@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
825 if (!IS_ERR(dst)) { 825 if (!IS_ERR(dst)) {
826 skb_dst_set(buff, dst); 826 skb_dst_set(buff, dst);
827 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 827 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
828 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 828 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
829 if (rst) 829 if (rst)
830 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 830 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
831 return; 831 return;
832 } 832 }
833 833
@@ -1276,8 +1276,8 @@ discard:
1276 kfree_skb(skb); 1276 kfree_skb(skb);
1277 return 0; 1277 return 0;
1278csum_err: 1278csum_err:
1279 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1279 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1280 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 1280 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1281 goto discard; 1281 goto discard;
1282 1282
1283 1283