summaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-06-04 11:33:59 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-06-06 09:18:22 -0400
commit12f47073a40f6aa75119d8f5df4077b7f334cced (patch)
tree397b8e5071285d9d6fc4c824049ab50e66032c0e /kernel/irq/manage.c
parent839b0f1c4ef674cd929a42304c078afca278581a (diff)
genirq/affinity: Defer affinity setting if irq chip is busy
The case that interrupt affinity setting fails with -EBUSY can be handled in the kernel completely by using the already available generic pending infrastructure. If a irq_chip::set_affinity() fails with -EBUSY, handle it like the interrupts for which irq_chip::set_affinity() can only be invoked from interrupt context. Copy the new affinity mask to irq_desc::pending_mask and set the affinity pending bit. The next raised interrupt for the affected irq will check the pending bit and try to set the new affinity from the handler. This avoids that -EBUSY is returned when an affinity change is requested from user space and the previous change has not been cleaned up. The new affinity will take effect when the next interrupt is raised from the device. Fixes: dccfe3147b42 ("x86/vector: Simplify vector move cleanup") Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Song Liu <songliubraving@fb.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <liu.song.a23@gmail.com> Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: stable@vger.kernel.org Cc: Mike Travis <mike.travis@hpe.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Tariq Toukan <tariqt@mellanox.com> Link: https://lkml.kernel.org/r/20180604162224.819273597@linutronix.de
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index e3336d904f64..facfecfc543c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -204,6 +204,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
204 return ret; 204 return ret;
205} 205}
206 206
207#ifdef CONFIG_GENERIC_PENDING_IRQ
208static inline int irq_set_affinity_pending(struct irq_data *data,
209 const struct cpumask *dest)
210{
211 struct irq_desc *desc = irq_data_to_desc(data);
212
213 irqd_set_move_pending(data);
214 irq_copy_pending(desc, dest);
215 return 0;
216}
217#else
218static inline int irq_set_affinity_pending(struct irq_data *data,
219 const struct cpumask *dest)
220{
221 return -EBUSY;
222}
223#endif
224
225static int irq_try_set_affinity(struct irq_data *data,
226 const struct cpumask *dest, bool force)
227{
228 int ret = irq_do_set_affinity(data, dest, force);
229
230 /*
231 * In case that the underlying vector management is busy and the
232 * architecture supports the generic pending mechanism then utilize
233 * this to avoid returning an error to user space.
234 */
235 if (ret == -EBUSY && !force)
236 ret = irq_set_affinity_pending(data, dest);
237 return ret;
238}
239
207int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 240int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
208 bool force) 241 bool force)
209{ 242{
@@ -214,8 +247,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
214 if (!chip || !chip->irq_set_affinity) 247 if (!chip || !chip->irq_set_affinity)
215 return -EINVAL; 248 return -EINVAL;
216 249
217 if (irq_can_move_pcntxt(data)) { 250 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
218 ret = irq_do_set_affinity(data, mask, force); 251 ret = irq_try_set_affinity(data, mask, force);
219 } else { 252 } else {
220 irqd_set_move_pending(data); 253 irqd_set_move_pending(data);
221 irq_copy_pending(desc, mask); 254 irq_copy_pending(desc, mask);