aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-12-03 14:14:53 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-05 13:32:23 -0500
commita9b204d1564702b704ad6fe74f10a102c7b87ba3 (patch)
tree44251778743174bb4e68c70225b19397f6c37c48 /net/ipv4/tcp_output.c
parentb223feb9de2a65c533ff95c08e834fa732906ea5 (diff)
tcp: tsq: avoid one atomic in tcp_wfree()
Under high load, tcp_wfree() has an atomic operation trying to schedule a tasklet over and over. We can schedule it only if our per cpu list was empty. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fa23b688a6f3..0db63efe5b8b 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb)
880 880
881 for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { 881 for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
882 struct tsq_tasklet *tsq; 882 struct tsq_tasklet *tsq;
883 bool empty;
883 884
884 if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) 885 if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
885 goto out; 886 goto out;
@@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb)
892 /* queue this socket to tasklet queue */ 893 /* queue this socket to tasklet queue */
893 local_irq_save(flags); 894 local_irq_save(flags);
894 tsq = this_cpu_ptr(&tsq_tasklet); 895 tsq = this_cpu_ptr(&tsq_tasklet);
896 empty = list_empty(&tsq->head);
895 list_add(&tp->tsq_node, &tsq->head); 897 list_add(&tp->tsq_node, &tsq->head);
896 tasklet_schedule(&tsq->tasklet); 898 if (empty)
899 tasklet_schedule(&tsq->tasklet);
897 local_irq_restore(flags); 900 local_irq_restore(flags);
898 return; 901 return;
899 } 902 }