aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c72
1 files changed, 55 insertions, 17 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 46d6611a33bb..3cfc0fefb5ee 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -17,6 +17,8 @@
17 17
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19 19
20cpumask_t irq_default_affinity = CPU_MASK_ALL;
21
20/** 22/**
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 23 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for 24 * @irq: interrupt number to wait for
@@ -95,6 +97,27 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
95 return 0; 97 return 0;
96} 98}
97 99
100#ifndef CONFIG_AUTO_IRQ_AFFINITY
101/*
102 * Generic version of the affinity autoselector.
103 */
104int irq_select_affinity(unsigned int irq)
105{
106 cpumask_t mask;
107
108 if (!irq_can_set_affinity(irq))
109 return 0;
110
111 cpus_and(mask, cpu_online_map, irq_default_affinity);
112
113 irq_desc[irq].affinity = mask;
114 irq_desc[irq].chip->set_affinity(irq, mask);
115
116 set_balance_irq_affinity(irq, mask);
117 return 0;
118}
119#endif
120
98#endif 121#endif
99 122
100/** 123/**
@@ -194,6 +217,17 @@ void enable_irq(unsigned int irq)
194} 217}
195EXPORT_SYMBOL(enable_irq); 218EXPORT_SYMBOL(enable_irq);
196 219
220int set_irq_wake_real(unsigned int irq, unsigned int on)
221{
222 struct irq_desc *desc = irq_desc + irq;
223 int ret = -ENXIO;
224
225 if (desc->chip->set_wake)
226 ret = desc->chip->set_wake(irq, on);
227
228 return ret;
229}
230
197/** 231/**
198 * set_irq_wake - control irq power management wakeup 232 * set_irq_wake - control irq power management wakeup
199 * @irq: interrupt to control 233 * @irq: interrupt to control
@@ -210,30 +244,34 @@ int set_irq_wake(unsigned int irq, unsigned int on)
210{ 244{
211 struct irq_desc *desc = irq_desc + irq; 245 struct irq_desc *desc = irq_desc + irq;
212 unsigned long flags; 246 unsigned long flags;
213 int ret = -ENXIO; 247 int ret = 0;
214 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
215 248
216 /* wakeup-capable irqs can be shared between drivers that 249 /* wakeup-capable irqs can be shared between drivers that
217 * don't need to have the same sleep mode behaviors. 250 * don't need to have the same sleep mode behaviors.
218 */ 251 */
219 spin_lock_irqsave(&desc->lock, flags); 252 spin_lock_irqsave(&desc->lock, flags);
220 if (on) { 253 if (on) {
221 if (desc->wake_depth++ == 0) 254 if (desc->wake_depth++ == 0) {
222 desc->status |= IRQ_WAKEUP; 255 ret = set_irq_wake_real(irq, on);
223 else 256 if (ret)
224 set_wake = NULL; 257 desc->wake_depth = 0;
258 else
259 desc->status |= IRQ_WAKEUP;
260 }
225 } else { 261 } else {
226 if (desc->wake_depth == 0) { 262 if (desc->wake_depth == 0) {
227 printk(KERN_WARNING "Unbalanced IRQ %d " 263 printk(KERN_WARNING "Unbalanced IRQ %d "
228 "wake disable\n", irq); 264 "wake disable\n", irq);
229 WARN_ON(1); 265 WARN_ON(1);
230 } else if (--desc->wake_depth == 0) 266 } else if (--desc->wake_depth == 0) {
231 desc->status &= ~IRQ_WAKEUP; 267 ret = set_irq_wake_real(irq, on);
232 else 268 if (ret)
233 set_wake = NULL; 269 desc->wake_depth = 1;
270 else
271 desc->status &= ~IRQ_WAKEUP;
272 }
234 } 273 }
235 if (set_wake) 274
236 ret = desc->chip->set_wake(irq, on);
237 spin_unlock_irqrestore(&desc->lock, flags); 275 spin_unlock_irqrestore(&desc->lock, flags);
238 return ret; 276 return ret;
239} 277}
@@ -354,7 +392,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
354 392
355 /* Setup the type (level, edge polarity) if configured: */ 393 /* Setup the type (level, edge polarity) if configured: */
356 if (new->flags & IRQF_TRIGGER_MASK) { 394 if (new->flags & IRQF_TRIGGER_MASK) {
357 if (desc->chip && desc->chip->set_type) 395 if (desc->chip->set_type)
358 desc->chip->set_type(irq, 396 desc->chip->set_type(irq,
359 new->flags & IRQF_TRIGGER_MASK); 397 new->flags & IRQF_TRIGGER_MASK);
360 else 398 else
@@ -364,8 +402,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
364 */ 402 */
365 printk(KERN_WARNING "No IRQF_TRIGGER set_type " 403 printk(KERN_WARNING "No IRQF_TRIGGER set_type "
366 "function for IRQ %d (%s)\n", irq, 404 "function for IRQ %d (%s)\n", irq,
367 desc->chip ? desc->chip->name : 405 desc->chip->name);
368 "unknown");
369 } else 406 } else
370 compat_irq_chip_set_default_handler(desc); 407 compat_irq_chip_set_default_handler(desc);
371 408
@@ -382,6 +419,9 @@ int setup_irq(unsigned int irq, struct irqaction *new)
382 } else 419 } else
383 /* Undo nested disables: */ 420 /* Undo nested disables: */
384 desc->depth = 1; 421 desc->depth = 1;
422
423 /* Set default affinity mask once everything is setup */
424 irq_select_affinity(irq);
385 } 425 }
386 /* Reset broken irq detection when installing new handler */ 426 /* Reset broken irq detection when installing new handler */
387 desc->irq_count = 0; 427 desc->irq_count = 0;
@@ -571,8 +611,6 @@ int request_irq(unsigned int irq, irq_handler_t handler,
571 action->next = NULL; 611 action->next = NULL;
572 action->dev_id = dev_id; 612 action->dev_id = dev_id;
573 613
574 select_smp_affinity(irq);
575
576#ifdef CONFIG_DEBUG_SHIRQ 614#ifdef CONFIG_DEBUG_SHIRQ
577 if (irqflags & IRQF_SHARED) { 615 if (irqflags & IRQF_SHARED) {
578 /* 616 /*