aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c49
1 files changed, 24 insertions, 25 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 17caf4b63342..744fa611cae0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
58DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 58DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 59
60const char * const softirq_to_name[NR_SOFTIRQS] = { 60const char * const softirq_to_name[NR_SOFTIRQS] = {
61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 61 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
62 "TASKLET", "SCHED", "HRTIMER", "RCU" 62 "TASKLET", "SCHED", "HRTIMER", "RCU"
63}; 63};
64 64
@@ -78,6 +78,17 @@ static void wakeup_softirqd(void)
78} 78}
79 79
80/* 80/*
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83 */
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
91/*
81 * preempt_count and SOFTIRQ_OFFSET usage: 92 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing. 94 * softirq processing.
@@ -313,7 +324,7 @@ asmlinkage __visible void do_softirq(void)
313 324
314 pending = local_softirq_pending(); 325 pending = local_softirq_pending();
315 326
316 if (pending) 327 if (pending && !ksoftirqd_running())
317 do_softirq_own_stack(); 328 do_softirq_own_stack();
318 329
319 local_irq_restore(flags); 330 local_irq_restore(flags);
@@ -340,6 +351,9 @@ void irq_enter(void)
340 351
341static inline void invoke_softirq(void) 352static inline void invoke_softirq(void)
342{ 353{
354 if (ksoftirqd_running())
355 return;
356
343 if (!force_irqthreads) { 357 if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 358#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345 /* 359 /*
@@ -482,7 +496,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
482} 496}
483EXPORT_SYMBOL(__tasklet_hi_schedule_first); 497EXPORT_SYMBOL(__tasklet_hi_schedule_first);
484 498
485static void tasklet_action(struct softirq_action *a) 499static __latent_entropy void tasklet_action(struct softirq_action *a)
486{ 500{
487 struct tasklet_struct *list; 501 struct tasklet_struct *list;
488 502
@@ -518,7 +532,7 @@ static void tasklet_action(struct softirq_action *a)
518 } 532 }
519} 533}
520 534
521static void tasklet_hi_action(struct softirq_action *a) 535static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
522{ 536{
523 struct tasklet_struct *list; 537 struct tasklet_struct *list;
524 538
@@ -700,7 +714,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
700 BUG(); 714 BUG();
701} 715}
702 716
703static void takeover_tasklets(unsigned int cpu) 717static int takeover_tasklets(unsigned int cpu)
704{ 718{
705 /* CPU is dead, so no lock needed. */ 719 /* CPU is dead, so no lock needed. */
706 local_irq_disable(); 720 local_irq_disable();
@@ -723,27 +737,12 @@ static void takeover_tasklets(unsigned int cpu)
723 raise_softirq_irqoff(HI_SOFTIRQ); 737 raise_softirq_irqoff(HI_SOFTIRQ);
724 738
725 local_irq_enable(); 739 local_irq_enable();
740 return 0;
726} 741}
742#else
743#define takeover_tasklets NULL
727#endif /* CONFIG_HOTPLUG_CPU */ 744#endif /* CONFIG_HOTPLUG_CPU */
728 745
729static int cpu_callback(struct notifier_block *nfb, unsigned long action,
730 void *hcpu)
731{
732 switch (action) {
733#ifdef CONFIG_HOTPLUG_CPU
734 case CPU_DEAD:
735 case CPU_DEAD_FROZEN:
736 takeover_tasklets((unsigned long)hcpu);
737 break;
738#endif /* CONFIG_HOTPLUG_CPU */
739 }
740 return NOTIFY_OK;
741}
742
743static struct notifier_block cpu_nfb = {
744 .notifier_call = cpu_callback
745};
746
747static struct smp_hotplug_thread softirq_threads = { 746static struct smp_hotplug_thread softirq_threads = {
748 .store = &ksoftirqd, 747 .store = &ksoftirqd,
749 .thread_should_run = ksoftirqd_should_run, 748 .thread_should_run = ksoftirqd_should_run,
@@ -753,8 +752,8 @@ static struct smp_hotplug_thread softirq_threads = {
753 752
754static __init int spawn_ksoftirqd(void) 753static __init int spawn_ksoftirqd(void)
755{ 754{
756 register_cpu_notifier(&cpu_nfb); 755 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
757 756 takeover_tasklets);
758 BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); 757 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
759 758
760 return 0; 759 return 0;