diff options
-rw-r--r-- | kernel/softirq.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 17caf4b63342..8ed90e3a88d6 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -78,6 +78,17 @@ static void wakeup_softirqd(void) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * If ksoftirqd is scheduled, we do not want to process pending softirqs | ||
82 | * right now. Let ksoftirqd handle this at its own rate, to get fairness. | ||
83 | */ | ||
84 | static bool ksoftirqd_running(void) | ||
85 | { | ||
86 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | ||
87 | |||
88 | return tsk && (tsk->state == TASK_RUNNING); | ||
89 | } | ||
90 | |||
91 | /* | ||
81 | * preempt_count and SOFTIRQ_OFFSET usage: | 92 | * preempt_count and SOFTIRQ_OFFSET usage: |
82 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | 93 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving |
83 | * softirq processing. | 94 | * softirq processing. |
@@ -313,7 +324,7 @@ asmlinkage __visible void do_softirq(void) | |||
313 | 324 | ||
314 | pending = local_softirq_pending(); | 325 | pending = local_softirq_pending(); |
315 | 326 | ||
316 | if (pending) | 327 | if (pending && !ksoftirqd_running()) |
317 | do_softirq_own_stack(); | 328 | do_softirq_own_stack(); |
318 | 329 | ||
319 | local_irq_restore(flags); | 330 | local_irq_restore(flags); |
@@ -340,6 +351,9 @@ void irq_enter(void) | |||
340 | 351 | ||
341 | static inline void invoke_softirq(void) | 352 | static inline void invoke_softirq(void) |
342 | { | 353 | { |
354 | if (ksoftirqd_running()) | ||
355 | return; | ||
356 | |||
343 | if (!force_irqthreads) { | 357 | if (!force_irqthreads) { |
344 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | 358 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
345 | /* | 359 | /* |