aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-29 17:16:47 -0400
committerDavid S. Miller <davem@davemloft.net>2016-05-02 17:02:25 -0400
commitc10d9310edf5aa4a676991139d1a43ec7d87e56b (patch)
tree8ba2a9ec1a83011976369f96481220cd7945eab6 /net/ipv4/tcp_output.c
parent5e59c83f2335d634946e1a4fe7bc1e921930f132 (diff)
tcp: do not assume TCP code is non preemptible
We want to to make TCP stack preemptible, as draining prequeue and backlog queues can take lot of time. Many SNMP updates were assuming that BH (and preemption) was disabled. Need to convert some __NET_INC_STATS() calls to NET_INC_STATS() and some __TCP_INC_STATS() to TCP_INC_STATS() Before using this_cpu_ptr(net->ipv4.tcp_sk) in tcp_v4_send_reset() and tcp_v4_send_ack(), we add an explicit preempt disabled section. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1a487ff95d4c..25d527922b18 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2221,14 +2221,13 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2221/* Thanks to skb fast clones, we can detect if a prior transmit of 2221/* Thanks to skb fast clones, we can detect if a prior transmit of
2222 * a packet is still in a qdisc or driver queue. 2222 * a packet is still in a qdisc or driver queue.
2223 * In this case, there is very little point doing a retransmit ! 2223 * In this case, there is very little point doing a retransmit !
2224 * Note: This is called from BH context only.
2225 */ 2224 */
2226static bool skb_still_in_host_queue(const struct sock *sk, 2225static bool skb_still_in_host_queue(const struct sock *sk,
2227 const struct sk_buff *skb) 2226 const struct sk_buff *skb)
2228{ 2227{
2229 if (unlikely(skb_fclone_busy(sk, skb))) { 2228 if (unlikely(skb_fclone_busy(sk, skb))) {
2230 __NET_INC_STATS(sock_net(sk), 2229 NET_INC_STATS(sock_net(sk),
2231 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 2230 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2232 return true; 2231 return true;
2233 } 2232 }
2234 return false; 2233 return false;
@@ -2290,7 +2289,7 @@ void tcp_send_loss_probe(struct sock *sk)
2290 tp->tlp_high_seq = tp->snd_nxt; 2289 tp->tlp_high_seq = tp->snd_nxt;
2291 2290
2292probe_sent: 2291probe_sent:
2293 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 2292 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2294 /* Reset s.t. tcp_rearm_rto will restart timer from now */ 2293 /* Reset s.t. tcp_rearm_rto will restart timer from now */
2295 inet_csk(sk)->icsk_pending = 0; 2294 inet_csk(sk)->icsk_pending = 0;
2296rearm_timer: 2295rearm_timer:
@@ -2699,7 +2698,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2699 tp->retrans_stamp = tcp_skb_timestamp(skb); 2698 tp->retrans_stamp = tcp_skb_timestamp(skb);
2700 2699
2701 } else if (err != -EBUSY) { 2700 } else if (err != -EBUSY) {
2702 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2701 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2703 } 2702 }
2704 2703
2705 if (tp->undo_retrans < 0) 2704 if (tp->undo_retrans < 0)
@@ -2823,7 +2822,7 @@ begin_fwd:
2823 if (tcp_retransmit_skb(sk, skb, segs)) 2822 if (tcp_retransmit_skb(sk, skb, segs))
2824 return; 2823 return;
2825 2824
2826 __NET_INC_STATS(sock_net(sk), mib_idx); 2825 NET_INC_STATS(sock_net(sk), mib_idx);
2827 2826
2828 if (tcp_in_cwnd_reduction(sk)) 2827 if (tcp_in_cwnd_reduction(sk))
2829 tp->prr_out += tcp_skb_pcount(skb); 2828 tp->prr_out += tcp_skb_pcount(skb);