diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 169 |
1 files changed, 85 insertions, 84 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index d972decf0324..ac222d0ab12e 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -439,8 +439,8 @@ void do_softirq(void) | |||
439 | 439 | ||
440 | static LIST_HEAD(irq_hosts); | 440 | static LIST_HEAD(irq_hosts); |
441 | static DEFINE_SPINLOCK(irq_big_lock); | 441 | static DEFINE_SPINLOCK(irq_big_lock); |
442 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); | 442 | static unsigned int revmap_trees_allocated; |
443 | static unsigned int irq_radix_writer; | 443 | static DEFINE_MUTEX(revmap_trees_mutex); |
444 | struct irq_map_entry irq_map[NR_IRQS]; | 444 | struct irq_map_entry irq_map[NR_IRQS]; |
445 | static unsigned int irq_virq_count = NR_IRQS; | 445 | static unsigned int irq_virq_count = NR_IRQS; |
446 | static struct irq_host *irq_default_host; | 446 | static struct irq_host *irq_default_host; |
@@ -583,57 +583,6 @@ void irq_set_virq_count(unsigned int count) | |||
583 | irq_virq_count = count; | 583 | irq_virq_count = count; |
584 | } | 584 | } |
585 | 585 | ||
586 | /* radix tree not lockless safe ! we use a brlock-type mecanism | ||
587 | * for now, until we can use a lockless radix tree | ||
588 | */ | ||
589 | static void irq_radix_wrlock(unsigned long *flags) | ||
590 | { | ||
591 | unsigned int cpu, ok; | ||
592 | |||
593 | spin_lock_irqsave(&irq_big_lock, *flags); | ||
594 | irq_radix_writer = 1; | ||
595 | smp_mb(); | ||
596 | do { | ||
597 | barrier(); | ||
598 | ok = 1; | ||
599 | for_each_possible_cpu(cpu) { | ||
600 | if (per_cpu(irq_radix_reader, cpu)) { | ||
601 | ok = 0; | ||
602 | break; | ||
603 | } | ||
604 | } | ||
605 | if (!ok) | ||
606 | cpu_relax(); | ||
607 | } while(!ok); | ||
608 | } | ||
609 | |||
610 | static void irq_radix_wrunlock(unsigned long flags) | ||
611 | { | ||
612 | smp_wmb(); | ||
613 | irq_radix_writer = 0; | ||
614 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
615 | } | ||
616 | |||
617 | static void irq_radix_rdlock(unsigned long *flags) | ||
618 | { | ||
619 | local_irq_save(*flags); | ||
620 | __get_cpu_var(irq_radix_reader) = 1; | ||
621 | smp_mb(); | ||
622 | if (likely(irq_radix_writer == 0)) | ||
623 | return; | ||
624 | __get_cpu_var(irq_radix_reader) = 0; | ||
625 | smp_wmb(); | ||
626 | spin_lock(&irq_big_lock); | ||
627 | __get_cpu_var(irq_radix_reader) = 1; | ||
628 | spin_unlock(&irq_big_lock); | ||
629 | } | ||
630 | |||
631 | static void irq_radix_rdunlock(unsigned long flags) | ||
632 | { | ||
633 | __get_cpu_var(irq_radix_reader) = 0; | ||
634 | local_irq_restore(flags); | ||
635 | } | ||
636 | |||
637 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 586 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
638 | irq_hw_number_t hwirq) | 587 | irq_hw_number_t hwirq) |
639 | { | 588 | { |
@@ -788,7 +737,6 @@ void irq_dispose_mapping(unsigned int virq) | |||
788 | { | 737 | { |
789 | struct irq_host *host; | 738 | struct irq_host *host; |
790 | irq_hw_number_t hwirq; | 739 | irq_hw_number_t hwirq; |
791 | unsigned long flags; | ||
792 | 740 | ||
793 | if (virq == NO_IRQ) | 741 | if (virq == NO_IRQ) |
794 | return; | 742 | return; |
@@ -821,12 +769,16 @@ void irq_dispose_mapping(unsigned int virq) | |||
821 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | 769 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
822 | break; | 770 | break; |
823 | case IRQ_HOST_MAP_TREE: | 771 | case IRQ_HOST_MAP_TREE: |
824 | /* Check if radix tree allocated yet */ | 772 | /* |
825 | if (host->revmap_data.tree.gfp_mask == 0) | 773 | * Check if radix tree allocated yet, if not then nothing to |
774 | * remove. | ||
775 | */ | ||
776 | smp_rmb(); | ||
777 | if (revmap_trees_allocated < 1) | ||
826 | break; | 778 | break; |
827 | irq_radix_wrlock(&flags); | 779 | mutex_lock(&revmap_trees_mutex); |
828 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 780 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
829 | irq_radix_wrunlock(flags); | 781 | mutex_unlock(&revmap_trees_mutex); |
830 | break; | 782 | break; |
831 | } | 783 | } |
832 | 784 | ||
@@ -875,43 +827,62 @@ unsigned int irq_find_mapping(struct irq_host *host, | |||
875 | EXPORT_SYMBOL_GPL(irq_find_mapping); | 827 | EXPORT_SYMBOL_GPL(irq_find_mapping); |
876 | 828 | ||
877 | 829 | ||
878 | unsigned int irq_radix_revmap(struct irq_host *host, | 830 | unsigned int irq_radix_revmap_lookup(struct irq_host *host, |
879 | irq_hw_number_t hwirq) | 831 | irq_hw_number_t hwirq) |
880 | { | 832 | { |
881 | struct radix_tree_root *tree; | ||
882 | struct irq_map_entry *ptr; | 833 | struct irq_map_entry *ptr; |
883 | unsigned int virq; | 834 | unsigned int virq; |
884 | unsigned long flags; | ||
885 | 835 | ||
886 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 836 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
887 | 837 | ||
888 | /* Check if the radix tree exist yet. We test the value of | 838 | /* |
889 | * the gfp_mask for that. Sneaky but saves another int in the | 839 | * Check if the radix tree exists and has bee initialized. |
890 | * structure. If not, we fallback to slow mode | 840 | * If not, we fallback to slow mode |
891 | */ | 841 | */ |
892 | tree = &host->revmap_data.tree; | 842 | if (revmap_trees_allocated < 2) |
893 | if (tree->gfp_mask == 0) | ||
894 | return irq_find_mapping(host, hwirq); | 843 | return irq_find_mapping(host, hwirq); |
895 | 844 | ||
896 | /* Now try to resolve */ | 845 | /* Now try to resolve */ |
897 | irq_radix_rdlock(&flags); | 846 | /* |
898 | ptr = radix_tree_lookup(tree, hwirq); | 847 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us |
899 | irq_radix_rdunlock(flags); | 848 | * as it's referencing an entry in the static irq_map table. |
849 | */ | ||
850 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | ||
900 | 851 | ||
901 | /* Found it, return */ | 852 | /* |
902 | if (ptr) { | 853 | * If found in radix tree, then fine. |
854 | * Else fallback to linear lookup - this should not happen in practice | ||
855 | * as it means that we failed to insert the node in the radix tree. | ||
856 | */ | ||
857 | if (ptr) | ||
903 | virq = ptr - irq_map; | 858 | virq = ptr - irq_map; |
904 | return virq; | 859 | else |
905 | } | 860 | virq = irq_find_mapping(host, hwirq); |
861 | |||
862 | return virq; | ||
863 | } | ||
864 | |||
865 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | ||
866 | irq_hw_number_t hwirq) | ||
867 | { | ||
868 | |||
869 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | ||
870 | |||
871 | /* | ||
872 | * Check if the radix tree exists yet. | ||
873 | * If not, then the irq will be inserted into the tree when it gets | ||
874 | * initialized. | ||
875 | */ | ||
876 | smp_rmb(); | ||
877 | if (revmap_trees_allocated < 1) | ||
878 | return; | ||
906 | 879 | ||
907 | /* If not there, try to insert it */ | ||
908 | virq = irq_find_mapping(host, hwirq); | ||
909 | if (virq != NO_IRQ) { | 880 | if (virq != NO_IRQ) { |
910 | irq_radix_wrlock(&flags); | 881 | mutex_lock(&revmap_trees_mutex); |
911 | radix_tree_insert(tree, hwirq, &irq_map[virq]); | 882 | radix_tree_insert(&host->revmap_data.tree, hwirq, |
912 | irq_radix_wrunlock(flags); | 883 | &irq_map[virq]); |
884 | mutex_unlock(&revmap_trees_mutex); | ||
913 | } | 885 | } |
914 | return virq; | ||
915 | } | 886 | } |
916 | 887 | ||
917 | unsigned int irq_linear_revmap(struct irq_host *host, | 888 | unsigned int irq_linear_revmap(struct irq_host *host, |
@@ -1020,14 +991,44 @@ void irq_early_init(void) | |||
1020 | static int irq_late_init(void) | 991 | static int irq_late_init(void) |
1021 | { | 992 | { |
1022 | struct irq_host *h; | 993 | struct irq_host *h; |
1023 | unsigned long flags; | 994 | unsigned int i; |
1024 | 995 | ||
1025 | irq_radix_wrlock(&flags); | 996 | /* |
997 | * No mutual exclusion with respect to accessors of the tree is needed | ||
998 | * here as the synchronization is done via the state variable | ||
999 | * revmap_trees_allocated. | ||
1000 | */ | ||
1026 | list_for_each_entry(h, &irq_hosts, link) { | 1001 | list_for_each_entry(h, &irq_hosts, link) { |
1027 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | 1002 | if (h->revmap_type == IRQ_HOST_MAP_TREE) |
1028 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | 1003 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); |
1004 | } | ||
1005 | |||
1006 | /* | ||
1007 | * Make sure the radix trees inits are visible before setting | ||
1008 | * the flag | ||
1009 | */ | ||
1010 | smp_wmb(); | ||
1011 | revmap_trees_allocated = 1; | ||
1012 | |||
1013 | /* | ||
1014 | * Insert the reverse mapping for those interrupts already present | ||
1015 | * in irq_map[]. | ||
1016 | */ | ||
1017 | mutex_lock(&revmap_trees_mutex); | ||
1018 | for (i = 0; i < irq_virq_count; i++) { | ||
1019 | if (irq_map[i].host && | ||
1020 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | ||
1021 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | ||
1022 | irq_map[i].hwirq, &irq_map[i]); | ||
1029 | } | 1023 | } |
1030 | irq_radix_wrunlock(flags); | 1024 | mutex_unlock(&revmap_trees_mutex); |
1025 | |||
1026 | /* | ||
1027 | * Make sure the radix trees insertions are visible before setting | ||
1028 | * the flag | ||
1029 | */ | ||
1030 | smp_wmb(); | ||
1031 | revmap_trees_allocated = 2; | ||
1031 | 1032 | ||
1032 | return 0; | 1033 | return 0; |
1033 | } | 1034 | } |