aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-29 17:16:47 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-02 17:02:25 -0400
commitc10d9310edf5aa4a676991139d1a43ec7d87e56b (patch)
tree8ba2a9ec1a83011976369f96481220cd7945eab6 /net/ipv4/tcp_ipv4.c
parent5e59c83f2335d634946e1a4fe7bc1e921930f132 (diff)
tcp: do not assume TCP code is non preemptible
We want to to make TCP stack preemptible, as draining prequeue and backlog queues can take lot of time. Many SNMP updates were assuming that BH (and preemption) was disabled. Need to convert some __NET_INC_STATS() calls to NET_INC_STATS() and some __TCP_INC_STATS() to TCP_INC_STATS() Before using this_cpu_ptr(net->ipv4.tcp_sk) in tcp_v4_send_reset() and tcp_v4_send_ack(), we add an explicit preempt disabled section. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 87b173b563b0..761bc492c5e3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -692,6 +692,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
692 offsetof(struct inet_timewait_sock, tw_bound_dev_if)); 692 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
693 693
694 arg.tos = ip_hdr(skb)->tos; 694 arg.tos = ip_hdr(skb)->tos;
695 preempt_disable();
695 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 696 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
696 skb, &TCP_SKB_CB(skb)->header.h4.opt, 697 skb, &TCP_SKB_CB(skb)->header.h4.opt,
697 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 698 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
@@ -699,6 +700,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
699 700
700 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 701 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
701 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 702 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
703 preempt_enable();
702 704
703#ifdef CONFIG_TCP_MD5SIG 705#ifdef CONFIG_TCP_MD5SIG
704out: 706out:
@@ -774,12 +776,14 @@ static void tcp_v4_send_ack(struct net *net,
774 if (oif) 776 if (oif)
775 arg.bound_dev_if = oif; 777 arg.bound_dev_if = oif;
776 arg.tos = tos; 778 arg.tos = tos;
779 preempt_disable();
777 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), 780 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
778 skb, &TCP_SKB_CB(skb)->header.h4.opt, 781 skb, &TCP_SKB_CB(skb)->header.h4.opt,
779 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 782 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
780 &arg, arg.iov[0].iov_len); 783 &arg, arg.iov[0].iov_len);
781 784
782 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 785 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
786 preempt_enable();
783} 787}
784 788
785static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) 789static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -1151,12 +1155,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1151 return false; 1155 return false;
1152 1156
1153 if (hash_expected && !hash_location) { 1157 if (hash_expected && !hash_location) {
1154 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 1158 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1155 return true; 1159 return true;
1156 } 1160 }
1157 1161
1158 if (!hash_expected && hash_location) { 1162 if (!hash_expected && hash_location) {
1159 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 1163 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1160 return true; 1164 return true;
1161 } 1165 }
1162 1166
@@ -1342,7 +1346,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1342 return newsk; 1346 return newsk;
1343 1347
1344exit_overflow: 1348exit_overflow:
1345 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 1349 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1346exit_nonewsk: 1350exit_nonewsk:
1347 dst_release(dst); 1351 dst_release(dst);
1348exit: 1352exit:
@@ -1432,8 +1436,8 @@ discard:
1432 return 0; 1436 return 0;
1433 1437
1434csum_err: 1438csum_err:
1435 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 1439 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1436 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 1440 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1437 goto discard; 1441 goto discard;
1438} 1442}
1439EXPORT_SYMBOL(tcp_v4_do_rcv); 1443EXPORT_SYMBOL(tcp_v4_do_rcv);