diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2019-04-09 05:34:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-04-18 08:07:52 -0400 |
commit | 471ba0e686cb13752bc1ff3216c54b69a2d250ea (patch) | |
tree | 23268643553d24fd331f93c268e1aacc5b14a1df | |
parent | 2d65c42b43e53d61f1fd6b8d0a097451a4cffa24 (diff) |
irq_work: Do not raise an IPI when queueing work on the local CPU
The QEMU PowerPC/PSeries machine model was not expecting a self-IPI,
and it may be a bit surprising thing to do, so have irq_work_queue_on
do local queueing when target is the current CPU.
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <clg@kaod.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190409093403.20994-1-npiggin@gmail.com
[ Simplified the preprocessor comments.
Fixed unbalanced curly brackets pointed out by Thomas. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/irq_work.c | 75 |
1 files changed, 42 insertions, 33 deletions
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 6b7cdf17ccf8..73288914ed5e 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void) | |||
56 | */ | 56 | */ |
57 | } | 57 | } |
58 | 58 | ||
59 | /* | 59 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
60 | * Enqueue the irq_work @work on @cpu unless it's already pending | 60 | static void __irq_work_queue_local(struct irq_work *work) |
61 | * somewhere. | ||
62 | * | ||
63 | * Can be re-enqueued while the callback is still in progress. | ||
64 | */ | ||
65 | bool irq_work_queue_on(struct irq_work *work, int cpu) | ||
66 | { | 61 | { |
67 | /* All work should have been flushed before going offline */ | 62 | /* If the work is "lazy", handle it from next tick if any */ |
68 | WARN_ON_ONCE(cpu_is_offline(cpu)); | 63 | if (work->flags & IRQ_WORK_LAZY) { |
69 | 64 | if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && | |
70 | #ifdef CONFIG_SMP | 65 | tick_nohz_tick_stopped()) |
71 | 66 | arch_irq_work_raise(); | |
72 | /* Arch remote IPI send/receive backend aren't NMI safe */ | 67 | } else { |
73 | WARN_ON_ONCE(in_nmi()); | 68 | if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
69 | arch_irq_work_raise(); | ||
70 | } | ||
71 | } | ||
74 | 72 | ||
73 | /* Enqueue the irq work @work on the current CPU */ | ||
74 | bool irq_work_queue(struct irq_work *work) | ||
75 | { | ||
75 | /* Only queue if not already pending */ | 76 | /* Only queue if not already pending */ |
76 | if (!irq_work_claim(work)) | 77 | if (!irq_work_claim(work)) |
77 | return false; | 78 | return false; |
78 | 79 | ||
79 | if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) | 80 | /* Queue the entry and raise the IPI if needed. */ |
80 | arch_send_call_function_single_ipi(cpu); | 81 | preempt_disable(); |
81 | 82 | __irq_work_queue_local(work); | |
82 | #else /* #ifdef CONFIG_SMP */ | 83 | preempt_enable(); |
83 | irq_work_queue(work); | ||
84 | #endif /* #else #ifdef CONFIG_SMP */ | ||
85 | 84 | ||
86 | return true; | 85 | return true; |
87 | } | 86 | } |
87 | EXPORT_SYMBOL_GPL(irq_work_queue); | ||
88 | 88 | ||
89 | /* Enqueue the irq work @work on the current CPU */ | 89 | /* |
90 | bool irq_work_queue(struct irq_work *work) | 90 | * Enqueue the irq_work @work on @cpu unless it's already pending |
91 | * somewhere. | ||
92 | * | ||
93 | * Can be re-enqueued while the callback is still in progress. | ||
94 | */ | ||
95 | bool irq_work_queue_on(struct irq_work *work, int cpu) | ||
91 | { | 96 | { |
97 | #ifndef CONFIG_SMP | ||
98 | return irq_work_queue(work); | ||
99 | |||
100 | #else /* CONFIG_SMP: */ | ||
101 | /* All work should have been flushed before going offline */ | ||
102 | WARN_ON_ONCE(cpu_is_offline(cpu)); | ||
103 | |||
92 | /* Only queue if not already pending */ | 104 | /* Only queue if not already pending */ |
93 | if (!irq_work_claim(work)) | 105 | if (!irq_work_claim(work)) |
94 | return false; | 106 | return false; |
95 | 107 | ||
96 | /* Queue the entry and raise the IPI if needed. */ | ||
97 | preempt_disable(); | 108 | preempt_disable(); |
98 | 109 | if (cpu != smp_processor_id()) { | |
99 | /* If the work is "lazy", handle it from next tick if any */ | 110 | /* Arch remote IPI send/receive backend aren't NMI safe */ |
100 | if (work->flags & IRQ_WORK_LAZY) { | 111 | WARN_ON_ONCE(in_nmi()); |
101 | if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && | 112 | if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) |
102 | tick_nohz_tick_stopped()) | 113 | arch_send_call_function_single_ipi(cpu); |
103 | arch_irq_work_raise(); | ||
104 | } else { | 114 | } else { |
105 | if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) | 115 | __irq_work_queue_local(work); |
106 | arch_irq_work_raise(); | ||
107 | } | 116 | } |
108 | |||
109 | preempt_enable(); | 117 | preempt_enable(); |
110 | 118 | ||
111 | return true; | 119 | return true; |
120 | #endif /* CONFIG_SMP */ | ||
112 | } | 121 | } |
113 | EXPORT_SYMBOL_GPL(irq_work_queue); | 122 | |
114 | 123 | ||
115 | bool irq_work_needs_cpu(void) | 124 | bool irq_work_needs_cpu(void) |
116 | { | 125 | { |