summaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-08-31 13:42:29 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 04:43:36 -0400
commit4cd13c21b207e80ddb1144c576500098f2d5f882 (patch)
tree53ea57c4d22567ed1c0779b82454ef8426abbf94 /kernel/softirq.c
parentb8129a1f6aaaca02d92186acf19ceb545b4b489a (diff)
softirq: Let ksoftirqd do its job
A while back, Paolo and Hannes sent an RFC patch adding threaded-able napi poll loop support : (https://patchwork.ozlabs.org/patch/620657/) The problem seems to be that softirqs are very aggressive and are often handled by the current process, even if we are under stress and that ksoftirqd was scheduled, so that innocent threads would have more chance to make progress. This patch makes sure that if ksoftirq is running, we let it perform the softirq work. Jonathan Corbet summarized the issue in https://lwn.net/Articles/687617/ Tested: - NIC receiving traffic handled by CPU 0 - UDP receiver running on CPU 0, using a single UDP socket. - Incoming flood of UDP packets targeting the UDP socket. Before the patch, the UDP receiver could almost never get CPU cycles and could only receive ~2,000 packets per second. After the patch, CPU cycles are split 50/50 between user application and ksoftirqd/0, and we can effectively read ~900,000 packets per second, a huge improvement in DOS situation. (Note that more packets are now dropped by the NIC itself, since the BH handlers get less CPU cycles to drain RX ring buffer) Since the load runs in well identified threads context, an admin can more easily tune process scheduling parameters if needed. Reported-by: Paolo Abeni <pabeni@redhat.com> Reported-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David Miller <davem@davemloft.net> Cc: Hannes Frederic Sowa <hannes@redhat.com> Cc: Jesper Dangaard Brouer <jbrouer@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1472665349.14381.356.camel@edumazet-glaptop3.roam.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 17caf4b63342..8ed90e3a88d6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -78,6 +78,17 @@ static void wakeup_softirqd(void)
78} 78}
79 79
80/* 80/*
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83 */
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
91/*
81 * preempt_count and SOFTIRQ_OFFSET usage: 92 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing. 94 * softirq processing.
@@ -313,7 +324,7 @@ asmlinkage __visible void do_softirq(void)
313 324
314 pending = local_softirq_pending(); 325 pending = local_softirq_pending();
315 326
316 if (pending) 327 if (pending && !ksoftirqd_running())
317 do_softirq_own_stack(); 328 do_softirq_own_stack();
318 329
319 local_irq_restore(flags); 330 local_irq_restore(flags);
@@ -340,6 +351,9 @@ void irq_enter(void)
340 351
341static inline void invoke_softirq(void) 352static inline void invoke_softirq(void)
342{ 353{
354 if (ksoftirqd_running())
355 return;
356
343 if (!force_irqthreads) { 357 if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 358#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345 /* 359 /*