diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2006-08-27 21:17:37 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-08-29 20:36:16 -0400 |
commit | 8ec8f2e85c6b88b4a1641eb3902275bcf2c6d60a (patch) | |
tree | ff4d7eacba8c61f9950cbb36b0844ca53546316b /arch/powerpc/kernel/irq.c | |
parent | 4b3afca9345f5beb9c607faeb2aef4f91dd91a13 (diff) |
[POWERPC] Fix performance regression in IRQ radix tree locking
When reworking the powerpc irq code, I figured out that we were using
the radix tree in a racy way. As a temporary fix, I put a spinlock in
there. However, this can have a significant impact on performances. This
patch reworks that to use a smarter technique based on the fact that
what we need is in fact a rwlock with extremely rare writers (thus
optimized for the read path).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 84 |
1 files changed, 66 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 7ee685433319..12c5971d6565 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -322,7 +322,8 @@ EXPORT_SYMBOL(do_softirq); | |||
322 | 322 | ||
323 | static LIST_HEAD(irq_hosts); | 323 | static LIST_HEAD(irq_hosts); |
324 | static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; | 324 | static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; |
325 | 325 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); | |
326 | static unsigned int irq_radix_writer; | ||
326 | struct irq_map_entry irq_map[NR_IRQS]; | 327 | struct irq_map_entry irq_map[NR_IRQS]; |
327 | static unsigned int irq_virq_count = NR_IRQS; | 328 | static unsigned int irq_virq_count = NR_IRQS; |
328 | static struct irq_host *irq_default_host; | 329 | static struct irq_host *irq_default_host; |
@@ -455,6 +456,58 @@ void irq_set_virq_count(unsigned int count) | |||
455 | irq_virq_count = count; | 456 | irq_virq_count = count; |
456 | } | 457 | } |
457 | 458 | ||
459 | /* radix tree not lockless safe ! we use a brlock-type mecanism | ||
460 | * for now, until we can use a lockless radix tree | ||
461 | */ | ||
462 | static void irq_radix_wrlock(unsigned long *flags) | ||
463 | { | ||
464 | unsigned int cpu, ok; | ||
465 | |||
466 | spin_lock_irqsave(&irq_big_lock, *flags); | ||
467 | irq_radix_writer = 1; | ||
468 | smp_mb(); | ||
469 | do { | ||
470 | barrier(); | ||
471 | ok = 1; | ||
472 | for_each_possible_cpu(cpu) { | ||
473 | if (per_cpu(irq_radix_reader, cpu)) { | ||
474 | ok = 0; | ||
475 | break; | ||
476 | } | ||
477 | } | ||
478 | if (!ok) | ||
479 | cpu_relax(); | ||
480 | } while(!ok); | ||
481 | } | ||
482 | |||
483 | static void irq_radix_wrunlock(unsigned long flags) | ||
484 | { | ||
485 | smp_wmb(); | ||
486 | irq_radix_writer = 0; | ||
487 | spin_unlock_irqrestore(&irq_big_lock, flags); | ||
488 | } | ||
489 | |||
490 | static void irq_radix_rdlock(unsigned long *flags) | ||
491 | { | ||
492 | local_irq_save(*flags); | ||
493 | __get_cpu_var(irq_radix_reader) = 1; | ||
494 | smp_mb(); | ||
495 | if (likely(irq_radix_writer == 0)) | ||
496 | return; | ||
497 | __get_cpu_var(irq_radix_reader) = 0; | ||
498 | smp_wmb(); | ||
499 | spin_lock(&irq_big_lock); | ||
500 | __get_cpu_var(irq_radix_reader) = 1; | ||
501 | spin_unlock(&irq_big_lock); | ||
502 | } | ||
503 | |||
504 | static void irq_radix_rdunlock(unsigned long flags) | ||
505 | { | ||
506 | __get_cpu_var(irq_radix_reader) = 0; | ||
507 | local_irq_restore(flags); | ||
508 | } | ||
509 | |||
510 | |||
458 | unsigned int irq_create_mapping(struct irq_host *host, | 511 | unsigned int irq_create_mapping(struct irq_host *host, |
459 | irq_hw_number_t hwirq) | 512 | irq_hw_number_t hwirq) |
460 | { | 513 | { |
@@ -604,13 +657,9 @@ void irq_dispose_mapping(unsigned int virq) | |||
604 | /* Check if radix tree allocated yet */ | 657 | /* Check if radix tree allocated yet */ |
605 | if (host->revmap_data.tree.gfp_mask == 0) | 658 | if (host->revmap_data.tree.gfp_mask == 0) |
606 | break; | 659 | break; |
607 | /* XXX radix tree not safe ! remove lock whem it becomes safe | 660 | irq_radix_wrlock(&flags); |
608 | * and use some RCU sync to make sure everything is ok before we | ||
609 | * can re-use that map entry | ||
610 | */ | ||
611 | spin_lock_irqsave(&irq_big_lock, flags); | ||
612 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 661 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
613 | spin_unlock_irqrestore(&irq_big_lock, flags); | 662 | irq_radix_wrunlock(flags); |
614 | break; | 663 | break; |
615 | } | 664 | } |
616 | 665 | ||
@@ -677,25 +726,24 @@ unsigned int irq_radix_revmap(struct irq_host *host, | |||
677 | if (tree->gfp_mask == 0) | 726 | if (tree->gfp_mask == 0) |
678 | return irq_find_mapping(host, hwirq); | 727 | return irq_find_mapping(host, hwirq); |
679 | 728 | ||
680 | /* XXX Current radix trees are NOT SMP safe !!! Remove that lock | ||
681 | * when that is fixed (when Nick's patch gets in | ||
682 | */ | ||
683 | spin_lock_irqsave(&irq_big_lock, flags); | ||
684 | |||
685 | /* Now try to resolve */ | 729 | /* Now try to resolve */ |
730 | irq_radix_rdlock(&flags); | ||
686 | ptr = radix_tree_lookup(tree, hwirq); | 731 | ptr = radix_tree_lookup(tree, hwirq); |
732 | irq_radix_rdunlock(flags); | ||
733 | |||
687 | /* Found it, return */ | 734 | /* Found it, return */ |
688 | if (ptr) { | 735 | if (ptr) { |
689 | virq = ptr - irq_map; | 736 | virq = ptr - irq_map; |
690 | goto bail; | 737 | return virq; |
691 | } | 738 | } |
692 | 739 | ||
693 | /* If not there, try to insert it */ | 740 | /* If not there, try to insert it */ |
694 | virq = irq_find_mapping(host, hwirq); | 741 | virq = irq_find_mapping(host, hwirq); |
695 | if (virq != NO_IRQ) | 742 | if (virq != NO_IRQ) { |
743 | irq_radix_wrlock(&flags); | ||
696 | radix_tree_insert(tree, hwirq, &irq_map[virq]); | 744 | radix_tree_insert(tree, hwirq, &irq_map[virq]); |
697 | bail: | 745 | irq_radix_wrunlock(flags); |
698 | spin_unlock_irqrestore(&irq_big_lock, flags); | 746 | } |
699 | return virq; | 747 | return virq; |
700 | } | 748 | } |
701 | 749 | ||
@@ -806,12 +854,12 @@ static int irq_late_init(void) | |||
806 | struct irq_host *h; | 854 | struct irq_host *h; |
807 | unsigned long flags; | 855 | unsigned long flags; |
808 | 856 | ||
809 | spin_lock_irqsave(&irq_big_lock, flags); | 857 | irq_radix_wrlock(&flags); |
810 | list_for_each_entry(h, &irq_hosts, link) { | 858 | list_for_each_entry(h, &irq_hosts, link) { |
811 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | 859 | if (h->revmap_type == IRQ_HOST_MAP_TREE) |
812 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | 860 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); |
813 | } | 861 | } |
814 | spin_unlock_irqrestore(&irq_big_lock, flags); | 862 | irq_radix_wrunlock(flags); |
815 | 863 | ||
816 | return 0; | 864 | return 0; |
817 | } | 865 | } |