diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-07-04 04:39:23 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-07-04 06:25:13 -0400 |
commit | 9c2555835bb3d34dfac52a0be943dcc4bedd650f (patch) | |
tree | 1a5a265c2f5c61b478bb1ed5df4ce9d8d15889e7 | |
parent | b6140914fd079e43ea75a53429b47128584f033a (diff) |
genirq: Introduce IRQD_AFFINITY_MANAGED flag
Interupts marked with this flag are excluded from user space interrupt
affinity changes. Contrary to the IRQ_NO_BALANCING flag, the kernel internal
affinity mechanism is not blocked.
This flag will be used for multi-queue device interrupts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: linux-block@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: linux-nvme@lists.infradead.org
Cc: axboe@fb.com
Cc: agordeev@redhat.com
Link: http://lkml.kernel.org/r/1467621574-8277-3-git-send-email-hch@lst.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/irq.h | 7 | ||||
-rw-r--r-- | kernel/irq/internals.h | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 21 | ||||
-rw-r--r-- | kernel/irq/proc.c | 2 |
4 files changed, 28 insertions, 4 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 4d758a7c604a..f6074813688d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -197,6 +197,7 @@ struct irq_data { | |||
197 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 197 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
198 | * IRQD_WAKEUP_ARMED - Wakeup mode armed | 198 | * IRQD_WAKEUP_ARMED - Wakeup mode armed |
199 | * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU | 199 | * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU |
200 | * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel | ||
200 | */ | 201 | */ |
201 | enum { | 202 | enum { |
202 | IRQD_TRIGGER_MASK = 0xf, | 203 | IRQD_TRIGGER_MASK = 0xf, |
@@ -212,6 +213,7 @@ enum { | |||
212 | IRQD_IRQ_INPROGRESS = (1 << 18), | 213 | IRQD_IRQ_INPROGRESS = (1 << 18), |
213 | IRQD_WAKEUP_ARMED = (1 << 19), | 214 | IRQD_WAKEUP_ARMED = (1 << 19), |
214 | IRQD_FORWARDED_TO_VCPU = (1 << 20), | 215 | IRQD_FORWARDED_TO_VCPU = (1 << 20), |
216 | IRQD_AFFINITY_MANAGED = (1 << 21), | ||
215 | }; | 217 | }; |
216 | 218 | ||
217 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) | 219 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
@@ -305,6 +307,11 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) | |||
305 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; | 307 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; |
306 | } | 308 | } |
307 | 309 | ||
310 | static inline bool irqd_affinity_is_managed(struct irq_data *d) | ||
311 | { | ||
312 | return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; | ||
313 | } | ||
314 | |||
308 | #undef __irqd_to_state | 315 | #undef __irqd_to_state |
309 | 316 | ||
310 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | 317 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 09be2c903c6d..b15aa3b617a2 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
105 | struct irqaction *action) { } | 105 | struct irqaction *action) { } |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | extern bool irq_can_set_affinity_usr(unsigned int irq); | ||
109 | |||
108 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | 110 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); |
109 | 111 | ||
110 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 112 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ef0bc02c3a70..30658e9827f0 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq); | |||
115 | #ifdef CONFIG_SMP | 115 | #ifdef CONFIG_SMP |
116 | cpumask_var_t irq_default_affinity; | 116 | cpumask_var_t irq_default_affinity; |
117 | 117 | ||
118 | static int __irq_can_set_affinity(struct irq_desc *desc) | 118 | static bool __irq_can_set_affinity(struct irq_desc *desc) |
119 | { | 119 | { |
120 | if (!desc || !irqd_can_balance(&desc->irq_data) || | 120 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
121 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | 121 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
122 | return 0; | 122 | return false; |
123 | return 1; | 123 | return true; |
124 | } | 124 | } |
125 | 125 | ||
126 | /** | 126 | /** |
@@ -134,6 +134,21 @@ int irq_can_set_affinity(unsigned int irq) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space | ||
138 | * @irq: Interrupt to check | ||
139 | * | ||
140 | * Like irq_can_set_affinity() above, but additionally checks for the | ||
141 | * AFFINITY_MANAGED flag. | ||
142 | */ | ||
143 | bool irq_can_set_affinity_usr(unsigned int irq) | ||
144 | { | ||
145 | struct irq_desc *desc = irq_to_desc(irq); | ||
146 | |||
147 | return __irq_can_set_affinity(desc) && | ||
148 | !irqd_affinity_is_managed(&desc->irq_data); | ||
149 | } | ||
150 | |||
151 | /** | ||
137 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | 152 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
138 | * @desc: irq descriptor which has affitnity changed | 153 | * @desc: irq descriptor which has affitnity changed |
139 | * | 154 | * |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 4e1b94726818..40bdcdc1f700 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file, | |||
96 | cpumask_var_t new_value; | 96 | cpumask_var_t new_value; |
97 | int err; | 97 | int err; |
98 | 98 | ||
99 | if (!irq_can_set_affinity(irq) || no_irq_affinity) | 99 | if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) |
100 | return -EIO; | 100 | return -EIO; |
101 | 101 | ||
102 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 102 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |