aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/irq/manage.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c84
1 files changed, 58 insertions, 26 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bde4c667d24d..704e488730a5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
46 cpu_relax(); 46 cpu_relax();
47 47
48 /* Ok, that indicated we're done: double-check carefully. */ 48 /* Ok, that indicated we're done: double-check carefully. */
49 spin_lock_irqsave(&desc->lock, flags); 49 raw_spin_lock_irqsave(&desc->lock, flags);
50 status = desc->status; 50 status = desc->status;
51 spin_unlock_irqrestore(&desc->lock, flags); 51 raw_spin_unlock_irqrestore(&desc->lock, flags);
52 52
53 /* Oops, that failed? */ 53 /* Oops, that failed? */
54 } while (status & IRQ_INPROGRESS); 54 } while (status & IRQ_INPROGRESS);
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
114 if (!desc->chip->set_affinity) 114 if (!desc->chip->set_affinity)
115 return -EINVAL; 115 return -EINVAL;
116 116
117 spin_lock_irqsave(&desc->lock, flags); 117 raw_spin_lock_irqsave(&desc->lock, flags);
118 118
119#ifdef CONFIG_GENERIC_PENDING_IRQ 119#ifdef CONFIG_GENERIC_PENDING_IRQ
120 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
134 } 134 }
135#endif 135#endif
136 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
137 spin_unlock_irqrestore(&desc->lock, flags); 137 raw_spin_unlock_irqrestore(&desc->lock, flags);
138 return 0; 138 return 0;
139} 139}
140 140
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
181 unsigned long flags; 181 unsigned long flags;
182 int ret; 182 int ret;
183 183
184 spin_lock_irqsave(&desc->lock, flags); 184 raw_spin_lock_irqsave(&desc->lock, flags);
185 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
186 if (!ret) 186 if (!ret)
187 irq_set_thread_affinity(desc); 187 irq_set_thread_affinity(desc);
188 spin_unlock_irqrestore(&desc->lock, flags); 188 raw_spin_unlock_irqrestore(&desc->lock, flags);
189 189
190 return ret; 190 return ret;
191} 191}
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq)
231 return; 231 return;
232 232
233 chip_bus_lock(irq, desc); 233 chip_bus_lock(irq, desc);
234 spin_lock_irqsave(&desc->lock, flags); 234 raw_spin_lock_irqsave(&desc->lock, flags);
235 __disable_irq(desc, irq, false); 235 __disable_irq(desc, irq, false);
236 spin_unlock_irqrestore(&desc->lock, flags); 236 raw_spin_unlock_irqrestore(&desc->lock, flags);
237 chip_bus_sync_unlock(irq, desc); 237 chip_bus_sync_unlock(irq, desc);
238} 238}
239EXPORT_SYMBOL(disable_irq_nosync); 239EXPORT_SYMBOL(disable_irq_nosync);
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
308 return; 308 return;
309 309
310 chip_bus_lock(irq, desc); 310 chip_bus_lock(irq, desc);
311 spin_lock_irqsave(&desc->lock, flags); 311 raw_spin_lock_irqsave(&desc->lock, flags);
312 __enable_irq(desc, irq, false); 312 __enable_irq(desc, irq, false);
313 spin_unlock_irqrestore(&desc->lock, flags); 313 raw_spin_unlock_irqrestore(&desc->lock, flags);
314 chip_bus_sync_unlock(irq, desc); 314 chip_bus_sync_unlock(irq, desc);
315} 315}
316EXPORT_SYMBOL(enable_irq); 316EXPORT_SYMBOL(enable_irq);
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
347 /* wakeup-capable irqs can be shared between drivers that 347 /* wakeup-capable irqs can be shared between drivers that
348 * don't need to have the same sleep mode behaviors. 348 * don't need to have the same sleep mode behaviors.
349 */ 349 */
350 spin_lock_irqsave(&desc->lock, flags); 350 raw_spin_lock_irqsave(&desc->lock, flags);
351 if (on) { 351 if (on) {
352 if (desc->wake_depth++ == 0) { 352 if (desc->wake_depth++ == 0) {
353 ret = set_irq_wake_real(irq, on); 353 ret = set_irq_wake_real(irq, on);
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
368 } 368 }
369 } 369 }
370 370
371 spin_unlock_irqrestore(&desc->lock, flags); 371 raw_spin_unlock_irqrestore(&desc->lock, flags);
372 return ret; 372 return ret;
373} 373}
374EXPORT_SYMBOL(set_irq_wake); 374EXPORT_SYMBOL(set_irq_wake);
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
382{ 382{
383 struct irq_desc *desc = irq_to_desc(irq); 383 struct irq_desc *desc = irq_to_desc(irq);
384 struct irqaction *action; 384 struct irqaction *action;
385 unsigned long flags;
385 386
386 if (!desc) 387 if (!desc)
387 return 0; 388 return 0;
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
389 if (desc->status & IRQ_NOREQUEST) 390 if (desc->status & IRQ_NOREQUEST)
390 return 0; 391 return 0;
391 392
393 raw_spin_lock_irqsave(&desc->lock, flags);
392 action = desc->action; 394 action = desc->action;
393 if (action) 395 if (action)
394 if (irqflags & action->flags & IRQF_SHARED) 396 if (irqflags & action->flags & IRQF_SHARED)
395 action = NULL; 397 action = NULL;
396 398
399 raw_spin_unlock_irqrestore(&desc->lock, flags);
400
397 return !action; 401 return !action;
398} 402}
399 403
@@ -483,13 +487,31 @@ static int irq_wait_for_interrupt(struct irqaction *action)
483 */ 487 */
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 488static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 489{
490again:
486 chip_bus_lock(irq, desc); 491 chip_bus_lock(irq, desc);
487 spin_lock_irq(&desc->lock); 492 raw_spin_lock_irq(&desc->lock);
493
494 /*
495 * Implausible though it may be we need to protect us against
496 * the following scenario:
497 *
498 * The thread is faster done than the hard interrupt handler
499 * on the other CPU. If we unmask the irq line then the
500 * interrupt can come in again and masks the line, leaves due
501 * to IRQ_INPROGRESS and the irq line is masked forever.
502 */
503 if (unlikely(desc->status & IRQ_INPROGRESS)) {
504 raw_spin_unlock_irq(&desc->lock);
505 chip_bus_sync_unlock(irq, desc);
506 cpu_relax();
507 goto again;
508 }
509
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 510 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 511 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 512 desc->chip->unmask(irq);
491 } 513 }
492 spin_unlock_irq(&desc->lock); 514 raw_spin_unlock_irq(&desc->lock);
493 chip_bus_sync_unlock(irq, desc); 515 chip_bus_sync_unlock(irq, desc);
494} 516}
495 517
@@ -514,9 +536,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
514 return; 536 return;
515 } 537 }
516 538
517 spin_lock_irq(&desc->lock); 539 raw_spin_lock_irq(&desc->lock);
518 cpumask_copy(mask, desc->affinity); 540 cpumask_copy(mask, desc->affinity);
519 spin_unlock_irq(&desc->lock); 541 raw_spin_unlock_irq(&desc->lock);
520 542
521 set_cpus_allowed_ptr(current, mask); 543 set_cpus_allowed_ptr(current, mask);
522 free_cpumask_var(mask); 544 free_cpumask_var(mask);
@@ -545,7 +567,7 @@ static int irq_thread(void *data)
545 567
546 atomic_inc(&desc->threads_active); 568 atomic_inc(&desc->threads_active);
547 569
548 spin_lock_irq(&desc->lock); 570 raw_spin_lock_irq(&desc->lock);
549 if (unlikely(desc->status & IRQ_DISABLED)) { 571 if (unlikely(desc->status & IRQ_DISABLED)) {
550 /* 572 /*
551 * CHECKME: We might need a dedicated 573 * CHECKME: We might need a dedicated
@@ -555,9 +577,9 @@ static int irq_thread(void *data)
555 * retriggers the interrupt itself --- tglx 577 * retriggers the interrupt itself --- tglx
556 */ 578 */
557 desc->status |= IRQ_PENDING; 579 desc->status |= IRQ_PENDING;
558 spin_unlock_irq(&desc->lock); 580 raw_spin_unlock_irq(&desc->lock);
559 } else { 581 } else {
560 spin_unlock_irq(&desc->lock); 582 raw_spin_unlock_irq(&desc->lock);
561 583
562 action->thread_fn(action->irq, action->dev_id); 584 action->thread_fn(action->irq, action->dev_id);
563 585
@@ -679,7 +701,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
679 /* 701 /*
680 * The following block of code has to be executed atomically 702 * The following block of code has to be executed atomically
681 */ 703 */
682 spin_lock_irqsave(&desc->lock, flags); 704 raw_spin_lock_irqsave(&desc->lock, flags);
683 old_ptr = &desc->action; 705 old_ptr = &desc->action;
684 old = *old_ptr; 706 old = *old_ptr;
685 if (old) { 707 if (old) {
@@ -735,6 +757,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
735 if (new->flags & IRQF_ONESHOT) 757 if (new->flags & IRQF_ONESHOT)
736 desc->status |= IRQ_ONESHOT; 758 desc->status |= IRQ_ONESHOT;
737 759
760 /*
761 * Force MSI interrupts to run with interrupts
762 * disabled. The multi vector cards can cause stack
763 * overflows due to nested interrupts when enough of
764 * them are directed to a core and fire at the same
765 * time.
766 */
767 if (desc->msi_desc)
768 new->flags |= IRQF_DISABLED;
769
738 if (!(desc->status & IRQ_NOAUTOEN)) { 770 if (!(desc->status & IRQ_NOAUTOEN)) {
739 desc->depth = 0; 771 desc->depth = 0;
740 desc->status &= ~IRQ_DISABLED; 772 desc->status &= ~IRQ_DISABLED;
@@ -775,7 +807,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
775 __enable_irq(desc, irq, false); 807 __enable_irq(desc, irq, false);
776 } 808 }
777 809
778 spin_unlock_irqrestore(&desc->lock, flags); 810 raw_spin_unlock_irqrestore(&desc->lock, flags);
779 811
780 /* 812 /*
781 * Strictly no need to wake it up, but hung_task complains 813 * Strictly no need to wake it up, but hung_task complains
@@ -802,7 +834,7 @@ mismatch:
802 ret = -EBUSY; 834 ret = -EBUSY;
803 835
804out_thread: 836out_thread:
805 spin_unlock_irqrestore(&desc->lock, flags); 837 raw_spin_unlock_irqrestore(&desc->lock, flags);
806 if (new->thread) { 838 if (new->thread) {
807 struct task_struct *t = new->thread; 839 struct task_struct *t = new->thread;
808 840
@@ -844,7 +876,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
844 if (!desc) 876 if (!desc)
845 return NULL; 877 return NULL;
846 878
847 spin_lock_irqsave(&desc->lock, flags); 879 raw_spin_lock_irqsave(&desc->lock, flags);
848 880
849 /* 881 /*
850 * There can be multiple actions per IRQ descriptor, find the right 882 * There can be multiple actions per IRQ descriptor, find the right
@@ -856,7 +888,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
856 888
857 if (!action) { 889 if (!action) {
858 WARN(1, "Trying to free already-free IRQ %d\n", irq); 890 WARN(1, "Trying to free already-free IRQ %d\n", irq);
859 spin_unlock_irqrestore(&desc->lock, flags); 891 raw_spin_unlock_irqrestore(&desc->lock, flags);
860 892
861 return NULL; 893 return NULL;
862 } 894 }
@@ -884,7 +916,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
884 desc->chip->disable(irq); 916 desc->chip->disable(irq);
885 } 917 }
886 918
887 spin_unlock_irqrestore(&desc->lock, flags); 919 raw_spin_unlock_irqrestore(&desc->lock, flags);
888 920
889 unregister_handler_proc(irq, action); 921 unregister_handler_proc(irq, action);
890 922
@@ -1067,7 +1099,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1067 kfree(action); 1099 kfree(action);
1068 1100
1069#ifdef CONFIG_DEBUG_SHIRQ 1101#ifdef CONFIG_DEBUG_SHIRQ
1070 if (irqflags & IRQF_SHARED) { 1102 if (!retval && (irqflags & IRQF_SHARED)) {
1071 /* 1103 /*
1072 * It's a shared IRQ -- the driver ought to be prepared for it 1104 * It's a shared IRQ -- the driver ought to be prepared for it
1073 * to happen immediately, so let's make sure.... 1105 * to happen immediately, so let's make sure....