diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-10-14 00:42:46 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-14 00:53:15 -0400 |
commit | eb2d8d60327bec172ec80efbda94d0c492088204 (patch) | |
tree | 5c01deb8c251f8aa64cc3db2b95fd26f8ac285a6 /arch/sparc64/kernel/irq.c | |
parent | a650d3839e7a68321e5b76264398a63019b0928b (diff) |
[SPARC64]: Access ivector_table[] using physical addresses.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r-- | arch/sparc64/kernel/irq.c | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 4db4dd576210..26cdf47981c3 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -51,15 +51,12 @@ | |||
51 | * To make processing these packets efficient and race free we use | 51 | * To make processing these packets efficient and race free we use |
52 | * an array of irq buckets below. The interrupt vector handler in | 52 | * an array of irq buckets below. The interrupt vector handler in |
53 | * entry.S feeds incoming packets into per-cpu pil-indexed lists. | 53 | * entry.S feeds incoming packets into per-cpu pil-indexed lists. |
54 | * The IVEC handler does not need to act atomically, the PIL dispatch | ||
55 | * code uses CAS to get an atomic snapshot of the list and clear it | ||
56 | * at the same time. | ||
57 | * | 54 | * |
58 | * If you make changes to ino_bucket, please update hand coded assembler | 55 | * If you make changes to ino_bucket, please update hand coded assembler |
59 | * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S | 56 | * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S |
60 | */ | 57 | */ |
61 | struct ino_bucket { | 58 | struct ino_bucket { |
62 | /*0x00*/unsigned long irq_chain; | 59 | /*0x00*/unsigned long irq_chain_pa; |
63 | 60 | ||
64 | /* Virtual interrupt number assigned to this INO. */ | 61 | /* Virtual interrupt number assigned to this INO. */ |
65 | /*0x08*/unsigned int virt_irq; | 62 | /*0x08*/unsigned int virt_irq; |
@@ -68,20 +65,14 @@ struct ino_bucket { | |||
68 | 65 | ||
69 | #define NUM_IVECS (IMAP_INR + 1) | 66 | #define NUM_IVECS (IMAP_INR + 1) |
70 | struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); | 67 | struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); |
68 | unsigned long ivector_table_pa; | ||
71 | 69 | ||
72 | #define __irq_ino(irq) \ | 70 | #define __irq_ino(irq) \ |
73 | (((struct ino_bucket *)(irq)) - &ivector_table[0]) | 71 | (((struct ino_bucket *)(irq)) - &ivector_table[0]) |
74 | #define __bucket(irq) ((struct ino_bucket *)(irq)) | 72 | #define __bucket(irq) ((struct ino_bucket *)(irq)) |
75 | #define __irq(bucket) ((unsigned long)(bucket)) | 73 | #define __irq(bucket) ((unsigned long)(bucket)) |
76 | 74 | ||
77 | /* This has to be in the main kernel image, it cannot be | 75 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) |
78 | * turned into per-cpu data. The reason is that the main | ||
79 | * kernel image is locked into the TLB and this structure | ||
80 | * is accessed from the vectored interrupt trap handler. If | ||
81 | * access to this structure takes a TLB miss it could cause | ||
82 | * the 5-level sparc v9 trap stack to overflow. | ||
83 | */ | ||
84 | #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) | ||
85 | 76 | ||
86 | static struct { | 77 | static struct { |
87 | unsigned long irq; | 78 | unsigned long irq; |
@@ -689,9 +680,8 @@ void ack_bad_irq(unsigned int virt_irq) | |||
689 | 680 | ||
690 | void handler_irq(int irq, struct pt_regs *regs) | 681 | void handler_irq(int irq, struct pt_regs *regs) |
691 | { | 682 | { |
692 | struct ino_bucket *bucket; | 683 | unsigned long pstate, bucket_pa; |
693 | struct pt_regs *old_regs; | 684 | struct pt_regs *old_regs; |
694 | unsigned long pstate; | ||
695 | 685 | ||
696 | clear_softint(1 << irq); | 686 | clear_softint(1 << irq); |
697 | 687 | ||
@@ -704,18 +694,30 @@ void handler_irq(int irq, struct pt_regs *regs) | |||
704 | "ldx [%2], %1\n\t" | 694 | "ldx [%2], %1\n\t" |
705 | "stx %%g0, [%2]\n\t" | 695 | "stx %%g0, [%2]\n\t" |
706 | "wrpr %0, 0x0, %%pstate\n\t" | 696 | "wrpr %0, 0x0, %%pstate\n\t" |
707 | : "=&r" (pstate), "=&r" (bucket) | 697 | : "=&r" (pstate), "=&r" (bucket_pa) |
708 | : "r" (irq_work(smp_processor_id())), | 698 | : "r" (irq_work_pa(smp_processor_id())), |
709 | "i" (PSTATE_IE) | 699 | "i" (PSTATE_IE) |
710 | : "memory"); | 700 | : "memory"); |
711 | 701 | ||
712 | while (bucket) { | 702 | while (bucket_pa) { |
713 | struct ino_bucket *next = __bucket(bucket->irq_chain); | 703 | unsigned long next_pa; |
704 | unsigned int virt_irq; | ||
714 | 705 | ||
715 | bucket->irq_chain = 0UL; | 706 | __asm__ __volatile__("ldxa [%2] %4, %0\n\t" |
716 | __do_IRQ(bucket->virt_irq); | 707 | "lduwa [%3] %4, %1\n\t" |
708 | "stxa %%g0, [%2] %4" | ||
709 | : "=&r" (next_pa), "=&r" (virt_irq) | ||
710 | : "r" (bucket_pa + | ||
711 | offsetof(struct ino_bucket, | ||
712 | irq_chain_pa)), | ||
713 | "r" (bucket_pa + | ||
714 | offsetof(struct ino_bucket, | ||
715 | virt_irq)), | ||
716 | "i" (ASI_PHYS_USE_EC)); | ||
717 | 717 | ||
718 | bucket = next; | 718 | __do_IRQ(virt_irq); |
719 | |||
720 | bucket_pa = next_pa; | ||
719 | } | 721 | } |
720 | 722 | ||
721 | irq_exit(); | 723 | irq_exit(); |
@@ -815,7 +817,7 @@ void init_irqwork_curcpu(void) | |||
815 | { | 817 | { |
816 | int cpu = hard_smp_processor_id(); | 818 | int cpu = hard_smp_processor_id(); |
817 | 819 | ||
818 | trap_block[cpu].irq_worklist = 0UL; | 820 | trap_block[cpu].irq_worklist_pa = 0UL; |
819 | } | 821 | } |
820 | 822 | ||
821 | /* Please be very careful with register_one_mondo() and | 823 | /* Please be very careful with register_one_mondo() and |
@@ -926,6 +928,14 @@ static struct irqaction timer_irq_action = { | |||
926 | .name = "timer", | 928 | .name = "timer", |
927 | }; | 929 | }; |
928 | 930 | ||
931 | /* XXX Belongs in a common location. XXX */ | ||
932 | static unsigned long kimage_addr_to_ra(void *p) | ||
933 | { | ||
934 | unsigned long val = (unsigned long) p; | ||
935 | |||
936 | return kern_base + (val - KERNBASE); | ||
937 | } | ||
938 | |||
929 | /* Only invoked on boot processor. */ | 939 | /* Only invoked on boot processor. */ |
930 | void __init init_IRQ(void) | 940 | void __init init_IRQ(void) |
931 | { | 941 | { |
@@ -933,6 +943,8 @@ void __init init_IRQ(void) | |||
933 | kill_prom_timer(); | 943 | kill_prom_timer(); |
934 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 944 | memset(&ivector_table[0], 0, sizeof(ivector_table)); |
935 | 945 | ||
946 | ivector_table_pa = kimage_addr_to_ra(&ivector_table[0]); | ||
947 | |||
936 | if (tlb_type == hypervisor) | 948 | if (tlb_type == hypervisor) |
937 | sun4v_init_mondo_queues(); | 949 | sun4v_init_mondo_queues(); |
938 | 950 | ||