summaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c37
1 files changed, 35 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4c2ef8084e32..daeabd791d58 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -205,6 +205,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
205 return ret; 205 return ret;
206} 206}
207 207
208#ifdef CONFIG_GENERIC_PENDING_IRQ
209static inline int irq_set_affinity_pending(struct irq_data *data,
210 const struct cpumask *dest)
211{
212 struct irq_desc *desc = irq_data_to_desc(data);
213
214 irqd_set_move_pending(data);
215 irq_copy_pending(desc, dest);
216 return 0;
217}
218#else
219static inline int irq_set_affinity_pending(struct irq_data *data,
220 const struct cpumask *dest)
221{
222 return -EBUSY;
223}
224#endif
225
226static int irq_try_set_affinity(struct irq_data *data,
227 const struct cpumask *dest, bool force)
228{
229 int ret = irq_do_set_affinity(data, dest, force);
230
231 /*
232 * In case that the underlying vector management is busy and the
233 * architecture supports the generic pending mechanism then utilize
234 * this to avoid returning an error to user space.
235 */
236 if (ret == -EBUSY && !force)
237 ret = irq_set_affinity_pending(data, dest);
238 return ret;
239}
240
208int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 241int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
209 bool force) 242 bool force)
210{ 243{
@@ -215,8 +248,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
215 if (!chip || !chip->irq_set_affinity) 248 if (!chip || !chip->irq_set_affinity)
216 return -EINVAL; 249 return -EINVAL;
217 250
218 if (irq_can_move_pcntxt(data)) { 251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
219 ret = irq_do_set_affinity(data, mask, force); 252 ret = irq_try_set_affinity(data, mask, force);
220 } else { 253 } else {
221 irqd_set_move_pending(data); 254 irqd_set_move_pending(data);
222 irq_copy_pending(desc, mask); 255 irq_copy_pending(desc, mask);