aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-17 21:22:24 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-18 22:52:32 -0500
commitf95e085b2531c86262b97a081eb0d1cf793606d3 (patch)
tree2f7c6cdd19695c8362fe788f5e3e746d4360d4af /arch/powerpc
parent087d8c7d0cc8a79e6bd6223f9b0018483124e769 (diff)
powerpc: Convert big_irq_lock to raw_spinlock
big_irq_lock needs to be a real spinlock in RT. Convert it to raw_spinlock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/irq.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9ae77e52f9d3..64f6f2031c22 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -520,7 +520,7 @@ void do_softirq(void)
520 */ 520 */
521 521
522static LIST_HEAD(irq_hosts); 522static LIST_HEAD(irq_hosts);
523static DEFINE_SPINLOCK(irq_big_lock); 523static DEFINE_RAW_SPINLOCK(irq_big_lock);
524static unsigned int revmap_trees_allocated; 524static unsigned int revmap_trees_allocated;
525static DEFINE_MUTEX(revmap_trees_mutex); 525static DEFINE_MUTEX(revmap_trees_mutex);
526struct irq_map_entry irq_map[NR_IRQS]; 526struct irq_map_entry irq_map[NR_IRQS];
@@ -566,14 +566,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
566 if (host->ops->match == NULL) 566 if (host->ops->match == NULL)
567 host->ops->match = default_irq_host_match; 567 host->ops->match = default_irq_host_match;
568 568
569 spin_lock_irqsave(&irq_big_lock, flags); 569 raw_spin_lock_irqsave(&irq_big_lock, flags);
570 570
571 /* If it's a legacy controller, check for duplicates and 571 /* If it's a legacy controller, check for duplicates and
572 * mark it as allocated (we use irq 0 host pointer for that 572 * mark it as allocated (we use irq 0 host pointer for that
573 */ 573 */
574 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 574 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
575 if (irq_map[0].host != NULL) { 575 if (irq_map[0].host != NULL) {
576 spin_unlock_irqrestore(&irq_big_lock, flags); 576 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
577 /* If we are early boot, we can't free the structure, 577 /* If we are early boot, we can't free the structure,
578 * too bad... 578 * too bad...
579 * this will be fixed once slab is made available early 579 * this will be fixed once slab is made available early
@@ -587,7 +587,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
587 } 587 }
588 588
589 list_add(&host->link, &irq_hosts); 589 list_add(&host->link, &irq_hosts);
590 spin_unlock_irqrestore(&irq_big_lock, flags); 590 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
591 591
592 /* Additional setups per revmap type */ 592 /* Additional setups per revmap type */
593 switch(revmap_type) { 593 switch(revmap_type) {
@@ -638,13 +638,13 @@ struct irq_host *irq_find_host(struct device_node *node)
638 * the absence of a device node. This isn't a problem so far 638 * the absence of a device node. This isn't a problem so far
639 * yet though... 639 * yet though...
640 */ 640 */
641 spin_lock_irqsave(&irq_big_lock, flags); 641 raw_spin_lock_irqsave(&irq_big_lock, flags);
642 list_for_each_entry(h, &irq_hosts, link) 642 list_for_each_entry(h, &irq_hosts, link)
643 if (h->ops->match(h, node)) { 643 if (h->ops->match(h, node)) {
644 found = h; 644 found = h;
645 break; 645 break;
646 } 646 }
647 spin_unlock_irqrestore(&irq_big_lock, flags); 647 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
648 return found; 648 return found;
649} 649}
650EXPORT_SYMBOL_GPL(irq_find_host); 650EXPORT_SYMBOL_GPL(irq_find_host);
@@ -1013,7 +1013,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
1013 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 1013 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1014 return NO_IRQ; 1014 return NO_IRQ;
1015 1015
1016 spin_lock_irqsave(&irq_big_lock, flags); 1016 raw_spin_lock_irqsave(&irq_big_lock, flags);
1017 1017
1018 /* Use hint for 1 interrupt if any */ 1018 /* Use hint for 1 interrupt if any */
1019 if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 1019 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
@@ -1037,7 +1037,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
1037 } 1037 }
1038 } 1038 }
1039 if (found == NO_IRQ) { 1039 if (found == NO_IRQ) {
1040 spin_unlock_irqrestore(&irq_big_lock, flags); 1040 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1041 return NO_IRQ; 1041 return NO_IRQ;
1042 } 1042 }
1043 hint_found: 1043 hint_found:
@@ -1046,7 +1046,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
1046 smp_wmb(); 1046 smp_wmb();
1047 irq_map[i].host = host; 1047 irq_map[i].host = host;
1048 } 1048 }
1049 spin_unlock_irqrestore(&irq_big_lock, flags); 1049 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1050 return found; 1050 return found;
1051} 1051}
1052 1052
@@ -1058,7 +1058,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
1058 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1058 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1059 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1059 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1060 1060
1061 spin_lock_irqsave(&irq_big_lock, flags); 1061 raw_spin_lock_irqsave(&irq_big_lock, flags);
1062 for (i = virq; i < (virq + count); i++) { 1062 for (i = virq; i < (virq + count); i++) {
1063 struct irq_host *host; 1063 struct irq_host *host;
1064 1064
@@ -1071,7 +1071,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
1071 smp_wmb(); 1071 smp_wmb();
1072 irq_map[i].host = NULL; 1072 irq_map[i].host = NULL;
1073 } 1073 }
1074 spin_unlock_irqrestore(&irq_big_lock, flags); 1074 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1075} 1075}
1076 1076
1077int arch_early_irq_init(void) 1077int arch_early_irq_init(void)