aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorJiang Liu <liuj97@gmail.com>2012-03-30 11:11:34 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-05-24 16:36:40 -0400
commit818b0f3bfb236ae66cac3ff38e86b9e47f24b7aa (patch)
tree711e4396d69f1127cd723b00ad348c47bd433a2b /kernel/irq
parent23812b9d9e497580d38c62ebdc6f308733b0a32a (diff)
genirq: Introduce irq_do_set_affinity() to reduce duplicated code
All invocations of chip->irq_set_affinity() are doing the same return value checks. Let them all use a common function. [ tglx: removed the silly likely while at it ] Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Jiang Liu <liuj97@gmail.com> Cc: Keping Chen <chenkeping@huawei.com> Link: http://lkml.kernel.org/r/1333120296-13563-3-git-send-email-jiang.liu@huawei.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c39
-rw-r--r--kernel/irq/migration.c13
3 files changed, 27 insertions, 28 deletions
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 8e5c56b3b7d9..001fa5bab490 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
101 101
102extern void irq_set_thread_affinity(struct irq_desc *desc); 102extern void irq_set_thread_affinity(struct irq_desc *desc);
103 103
104extern int irq_do_set_affinity(struct irq_data *data,
105 const struct cpumask *dest, bool force);
106
104/* Inline functions for support of irq chips on slow busses */ 107/* Inline functions for support of irq chips on slow busses */
105static inline void chip_bus_lock(struct irq_desc *desc) 108static inline void chip_bus_lock(struct irq_desc *desc)
106{ 109{
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bb32326afe87..a1b903380bcf 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -139,6 +139,25 @@ static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif 140#endif
141 141
142int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
143 bool force)
144{
145 struct irq_desc *desc = irq_data_to_desc(data);
146 struct irq_chip *chip = irq_data_get_irq_chip(data);
147 int ret;
148
149 ret = chip->irq_set_affinity(data, mask, false);
150 switch (ret) {
151 case IRQ_SET_MASK_OK:
152 cpumask_copy(data->affinity, mask);
153 case IRQ_SET_MASK_OK_NOCOPY:
154 irq_set_thread_affinity(desc);
155 ret = 0;
156 }
157
158 return ret;
159}
160
142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 161int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143{ 162{
144 struct irq_chip *chip = irq_data_get_irq_chip(data); 163 struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -149,14 +168,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
149 return -EINVAL; 168 return -EINVAL;
150 169
151 if (irq_can_move_pcntxt(data)) { 170 if (irq_can_move_pcntxt(data)) {
152 ret = chip->irq_set_affinity(data, mask, false); 171 ret = irq_do_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else { 172 } else {
161 irqd_set_move_pending(data); 173 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask); 174 irq_copy_pending(desc, mask);
@@ -280,9 +292,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
280static int 292static int
281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 293setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282{ 294{
283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity; 295 struct cpumask *set = irq_default_affinity;
285 int ret, node = desc->irq_data.node; 296 int node = desc->irq_data.node;
286 297
287 /* Excludes PER_CPU and NO_BALANCE interrupts */ 298 /* Excludes PER_CPU and NO_BALANCE interrupts */
288 if (!irq_can_set_affinity(irq)) 299 if (!irq_can_set_affinity(irq))
@@ -308,13 +319,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
308 if (cpumask_intersects(mask, nodemask)) 319 if (cpumask_intersects(mask, nodemask))
309 cpumask_and(mask, mask, nodemask); 320 cpumask_and(mask, mask, nodemask);
310 } 321 }
311 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 322 irq_do_set_affinity(&desc->irq_data, mask, false);
312 switch (ret) {
313 case IRQ_SET_MASK_OK:
314 cpumask_copy(desc->irq_data.affinity, mask);
315 case IRQ_SET_MASK_OK_NOCOPY:
316 irq_set_thread_affinity(desc);
317 }
318 return 0; 323 return 0;
319} 324}
320#else 325#else
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index c3c89751b327..ca3f4aaff707 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
42 * For correct operation this depends on the caller 42 * For correct operation this depends on the caller
43 * masking the irqs. 43 * masking the irqs.
44 */ 44 */
45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
46 < nr_cpu_ids)) { 46 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
47 int ret = chip->irq_set_affinity(&desc->irq_data,
48 desc->pending_mask, false);
49 switch (ret) {
50 case IRQ_SET_MASK_OK:
51 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
52 case IRQ_SET_MASK_OK_NOCOPY:
53 irq_set_thread_affinity(desc);
54 }
55 }
56 47
57 cpumask_clear(desc->pending_mask); 48 cpumask_clear(desc->pending_mask);
58} 49}