diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 76 |
1 files changed, 11 insertions, 65 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2656924415da..ac222d0ab12e 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -439,9 +439,8 @@ void do_softirq(void) | |||
439 | 439 | ||
440 | static LIST_HEAD(irq_hosts); | 440 | static LIST_HEAD(irq_hosts); |
441 | static DEFINE_SPINLOCK(irq_big_lock); | 441 | static DEFINE_SPINLOCK(irq_big_lock); |
442 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); | ||
443 | static unsigned int irq_radix_writer; | ||
444 | static unsigned int revmap_trees_allocated; | 442 | static unsigned int revmap_trees_allocated; |
443 | static DEFINE_MUTEX(revmap_trees_mutex); | ||
445 | struct irq_map_entry irq_map[NR_IRQS]; | 444 | struct irq_map_entry irq_map[NR_IRQS]; |
446 | static unsigned int irq_virq_count = NR_IRQS; | 445 | static unsigned int irq_virq_count = NR_IRQS; |
447 | static struct irq_host *irq_default_host; | 446 | static struct irq_host *irq_default_host; |
@@ -584,57 +583,6 @@ void irq_set_virq_count(unsigned int count) | |||
584 | irq_virq_count = count; | 583 | irq_virq_count = count; |
585 | } | 584 | } |
586 | 585 | ||
587 | /* radix tree not lockless safe ! we use a brlock-type mecanism | ||
588 | * for now, until we can use a lockless radix tree | ||
589 | */ | ||
590 | static void irq_radix_wrlock(unsigned long *flags) | ||
591 | { | ||
592 | unsigned int cpu, ok; | ||
593 | |||
594 | spin_lock_irqsave(&irq_big_lock, *flags); | ||
595 | irq_radix_writer = 1; | ||
596 | smp_mb(); | ||
597 | do { | ||
598 | barrier(); | ||
599 | ok = 1; | ||
600 | for_each_possible_cpu(cpu) { | ||
601 | if (per_cpu(irq_radix_reader, cpu)) { | ||
602 | ok = 0; | ||
603 | break; | ||
604 | } | ||
605 | } | ||
606 | if (!ok) | ||
607 | cpu_relax(); | ||
608 | } while(!ok); | ||
609 | } | ||
610 | |||
611 | static void irq_radix_wrunlock(unsigned long flags) | ||
612 | { | ||
613 | smp_wmb(); | ||
614 | irq_radix_writer = 0; | ||
615 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
616 | } | ||
617 | |||
618 | static void irq_radix_rdlock(unsigned long *flags) | ||
619 | { | ||
620 | local_irq_save(*flags); | ||
621 | __get_cpu_var(irq_radix_reader) = 1; | ||
622 | smp_mb(); | ||
623 | if (likely(irq_radix_writer == 0)) | ||
624 | return; | ||
625 | __get_cpu_var(irq_radix_reader) = 0; | ||
626 | smp_wmb(); | ||
627 | spin_lock(&irq_big_lock); | ||
628 | __get_cpu_var(irq_radix_reader) = 1; | ||
629 | spin_unlock(&irq_big_lock); | ||
630 | } | ||
631 | |||
632 | static void irq_radix_rdunlock(unsigned long flags) | ||
633 | { | ||
634 | __get_cpu_var(irq_radix_reader) = 0; | ||
635 | local_irq_restore(flags); | ||
636 | } | ||
637 | |||
638 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 586 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
639 | irq_hw_number_t hwirq) | 587 | irq_hw_number_t hwirq) |
640 | { | 588 | { |
@@ -789,7 +737,6 @@ void irq_dispose_mapping(unsigned int virq) | |||
789 | { | 737 | { |
790 | struct irq_host *host; | 738 | struct irq_host *host; |
791 | irq_hw_number_t hwirq; | 739 | irq_hw_number_t hwirq; |
792 | unsigned long flags; | ||
793 | 740 | ||
794 | if (virq == NO_IRQ) | 741 | if (virq == NO_IRQ) |
795 | return; | 742 | return; |
@@ -829,9 +776,9 @@ void irq_dispose_mapping(unsigned int virq) | |||
829 | smp_rmb(); | 776 | smp_rmb(); |
830 | if (revmap_trees_allocated < 1) | 777 | if (revmap_trees_allocated < 1) |
831 | break; | 778 | break; |
832 | irq_radix_wrlock(&flags); | 779 | mutex_lock(&revmap_trees_mutex); |
833 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 780 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
834 | irq_radix_wrunlock(flags); | 781 | mutex_unlock(&revmap_trees_mutex); |
835 | break; | 782 | break; |
836 | } | 783 | } |
837 | 784 | ||
@@ -885,7 +832,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
885 | { | 832 | { |
886 | struct irq_map_entry *ptr; | 833 | struct irq_map_entry *ptr; |
887 | unsigned int virq; | 834 | unsigned int virq; |
888 | unsigned long flags; | ||
889 | 835 | ||
890 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 836 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
891 | 837 | ||
@@ -897,9 +843,11 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
897 | return irq_find_mapping(host, hwirq); | 843 | return irq_find_mapping(host, hwirq); |
898 | 844 | ||
899 | /* Now try to resolve */ | 845 | /* Now try to resolve */ |
900 | irq_radix_rdlock(&flags); | 846 | /* |
847 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | ||
848 | * as it's referencing an entry in the static irq_map table. | ||
849 | */ | ||
901 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | 850 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
902 | irq_radix_rdunlock(flags); | ||
903 | 851 | ||
904 | /* | 852 | /* |
905 | * If found in radix tree, then fine. | 853 | * If found in radix tree, then fine. |
@@ -917,7 +865,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
917 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | 865 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, |
918 | irq_hw_number_t hwirq) | 866 | irq_hw_number_t hwirq) |
919 | { | 867 | { |
920 | unsigned long flags; | ||
921 | 868 | ||
922 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 869 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
923 | 870 | ||
@@ -931,10 +878,10 @@ void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | |||
931 | return; | 878 | return; |
932 | 879 | ||
933 | if (virq != NO_IRQ) { | 880 | if (virq != NO_IRQ) { |
934 | irq_radix_wrlock(&flags); | 881 | mutex_lock(&revmap_trees_mutex); |
935 | radix_tree_insert(&host->revmap_data.tree, hwirq, | 882 | radix_tree_insert(&host->revmap_data.tree, hwirq, |
936 | &irq_map[virq]); | 883 | &irq_map[virq]); |
937 | irq_radix_wrunlock(flags); | 884 | mutex_unlock(&revmap_trees_mutex); |
938 | } | 885 | } |
939 | } | 886 | } |
940 | 887 | ||
@@ -1044,7 +991,6 @@ void irq_early_init(void) | |||
1044 | static int irq_late_init(void) | 991 | static int irq_late_init(void) |
1045 | { | 992 | { |
1046 | struct irq_host *h; | 993 | struct irq_host *h; |
1047 | unsigned long flags; | ||
1048 | unsigned int i; | 994 | unsigned int i; |
1049 | 995 | ||
1050 | /* | 996 | /* |
@@ -1068,14 +1014,14 @@ static int irq_late_init(void) | |||
1068 | * Insert the reverse mapping for those interrupts already present | 1014 | * Insert the reverse mapping for those interrupts already present |
1069 | * in irq_map[]. | 1015 | * in irq_map[]. |
1070 | */ | 1016 | */ |
1071 | irq_radix_wrlock(&flags); | 1017 | mutex_lock(&revmap_trees_mutex); |
1072 | for (i = 0; i < irq_virq_count; i++) { | 1018 | for (i = 0; i < irq_virq_count; i++) { |
1073 | if (irq_map[i].host && | 1019 | if (irq_map[i].host && |
1074 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | 1020 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) |
1075 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | 1021 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, |
1076 | irq_map[i].hwirq, &irq_map[i]); | 1022 | irq_map[i].hwirq, &irq_map[i]); |
1077 | } | 1023 | } |
1078 | irq_radix_wrunlock(flags); | 1024 | mutex_unlock(&revmap_trees_mutex); |
1079 | 1025 | ||
1080 | /* | 1026 | /* |
1081 | * Make sure the radix trees insertions are visible before setting | 1027 | * Make sure the radix trees insertions are visible before setting |