aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/irq.c122
-rw-r--r--arch/sparc64/mm/init.c2
2 files changed, 91 insertions, 33 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 4e9537c96778..dc51bdf853cd 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -42,6 +42,7 @@
42#include <asm/auxio.h> 42#include <asm/auxio.h>
43#include <asm/head.h> 43#include <asm/head.h>
44#include <asm/hypervisor.h> 44#include <asm/hypervisor.h>
45#include <asm/cacheflush.h>
45 46
46/* UPA nodes send interrupt packet to UltraSparc with first data reg 47/* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being 48 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
@@ -56,10 +57,10 @@
56 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S 57 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
57 */ 58 */
58struct ino_bucket { 59struct ino_bucket {
59/*0x00*/unsigned long irq_chain_pa; 60/*0x00*/unsigned long __irq_chain_pa;
60 61
61 /* Virtual interrupt number assigned to this INO. */ 62 /* Virtual interrupt number assigned to this INO. */
62/*0x08*/unsigned int virt_irq; 63/*0x08*/unsigned int __virt_irq;
63/*0x0c*/unsigned int __pad; 64/*0x0c*/unsigned int __pad;
64}; 65};
65 66
@@ -67,6 +68,60 @@ struct ino_bucket {
67struct ino_bucket *ivector_table; 68struct ino_bucket *ivector_table;
68unsigned long ivector_table_pa; 69unsigned long ivector_table_pa;
69 70
71/* On several sun4u processors, it is illegal to mix bypass and
72 * non-bypass accesses. Therefore we access all INO buckets
73 * using bypass accesses only.
74 */
75static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
76{
77 unsigned long ret;
78
79 __asm__ __volatile__("ldxa [%1] %2, %0"
80 : "=&r" (ret)
81 : "r" (bucket_pa +
82 offsetof(struct ino_bucket,
83 __irq_chain_pa)),
84 "i" (ASI_PHYS_USE_EC));
85
86 return ret;
87}
88
89static void bucket_clear_chain_pa(unsigned long bucket_pa)
90{
91 __asm__ __volatile__("stxa %%g0, [%0] %1"
92 : /* no outputs */
93 : "r" (bucket_pa +
94 offsetof(struct ino_bucket,
95 __irq_chain_pa)),
96 "i" (ASI_PHYS_USE_EC));
97}
98
99static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
100{
101 unsigned int ret;
102
103 __asm__ __volatile__("lduwa [%1] %2, %0"
104 : "=&r" (ret)
105 : "r" (bucket_pa +
106 offsetof(struct ino_bucket,
107 __virt_irq)),
108 "i" (ASI_PHYS_USE_EC));
109
110 return ret;
111}
112
113static void bucket_set_virt_irq(unsigned long bucket_pa,
114 unsigned int virt_irq)
115{
116 __asm__ __volatile__("stwa %0, [%1] %2"
117 : /* no outputs */
118 : "r" (virt_irq),
119 "r" (bucket_pa +
120 offsetof(struct ino_bucket,
121 __virt_irq)),
122 "i" (ASI_PHYS_USE_EC));
123}
124
70#define __irq_ino(irq) \ 125#define __irq_ino(irq) \
71 (((struct ino_bucket *)(irq)) - &ivector_table[0]) 126 (((struct ino_bucket *)(irq)) - &ivector_table[0])
72#define __bucket(irq) ((struct ino_bucket *)(irq)) 127#define __bucket(irq) ((struct ino_bucket *)(irq))
@@ -569,18 +624,21 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
569{ 624{
570 struct ino_bucket *bucket; 625 struct ino_bucket *bucket;
571 struct irq_handler_data *data; 626 struct irq_handler_data *data;
627 unsigned int virt_irq;
572 int ino; 628 int ino;
573 629
574 BUG_ON(tlb_type == hypervisor); 630 BUG_ON(tlb_type == hypervisor);
575 631
576 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; 632 ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
577 bucket = &ivector_table[ino]; 633 bucket = &ivector_table[ino];
578 if (!bucket->virt_irq) { 634 virt_irq = bucket_get_virt_irq(__pa(bucket));
579 bucket->virt_irq = virt_irq_alloc(__irq(bucket)); 635 if (!virt_irq) {
580 set_irq_chip(bucket->virt_irq, &sun4u_irq); 636 virt_irq = virt_irq_alloc(__irq(bucket));
637 bucket_set_virt_irq(__pa(bucket), virt_irq);
638 set_irq_chip(virt_irq, &sun4u_irq);
581 } 639 }
582 640
583 data = get_irq_chip_data(bucket->virt_irq); 641 data = get_irq_chip_data(virt_irq);
584 if (unlikely(data)) 642 if (unlikely(data))
585 goto out; 643 goto out;
586 644
@@ -589,13 +647,13 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
589 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 647 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
590 prom_halt(); 648 prom_halt();
591 } 649 }
592 set_irq_chip_data(bucket->virt_irq, data); 650 set_irq_chip_data(virt_irq, data);
593 651
594 data->imap = imap; 652 data->imap = imap;
595 data->iclr = iclr; 653 data->iclr = iclr;
596 654
597out: 655out:
598 return bucket->virt_irq; 656 return virt_irq;
599} 657}
600 658
601static unsigned int sun4v_build_common(unsigned long sysino, 659static unsigned int sun4v_build_common(unsigned long sysino,
@@ -603,16 +661,19 @@ static unsigned int sun4v_build_common(unsigned long sysino,
603{ 661{
604 struct ino_bucket *bucket; 662 struct ino_bucket *bucket;
605 struct irq_handler_data *data; 663 struct irq_handler_data *data;
664 unsigned int virt_irq;
606 665
607 BUG_ON(tlb_type != hypervisor); 666 BUG_ON(tlb_type != hypervisor);
608 667
609 bucket = &ivector_table[sysino]; 668 bucket = &ivector_table[sysino];
610 if (!bucket->virt_irq) { 669 virt_irq = bucket_get_virt_irq(__pa(bucket));
611 bucket->virt_irq = virt_irq_alloc(__irq(bucket)); 670 if (!virt_irq) {
612 set_irq_chip(bucket->virt_irq, chip); 671 virt_irq = virt_irq_alloc(__irq(bucket));
672 bucket_set_virt_irq(__pa(bucket), virt_irq);
673 set_irq_chip(virt_irq, chip);
613 } 674 }
614 675
615 data = get_irq_chip_data(bucket->virt_irq); 676 data = get_irq_chip_data(virt_irq);
616 if (unlikely(data)) 677 if (unlikely(data))
617 goto out; 678 goto out;
618 679
@@ -621,7 +682,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
621 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); 682 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
622 prom_halt(); 683 prom_halt();
623 } 684 }
624 set_irq_chip_data(bucket->virt_irq, data); 685 set_irq_chip_data(virt_irq, data);
625 686
626 /* Catch accidental accesses to these things. IMAP/ICLR handling 687 /* Catch accidental accesses to these things. IMAP/ICLR handling
627 * is done by hypervisor calls on sun4v platforms, not by direct 688 * is done by hypervisor calls on sun4v platforms, not by direct
@@ -631,7 +692,7 @@ static unsigned int sun4v_build_common(unsigned long sysino,
631 data->iclr = ~0UL; 692 data->iclr = ~0UL;
632 693
633out: 694out:
634 return bucket->virt_irq; 695 return virt_irq;
635} 696}
636 697
637unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) 698unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -646,19 +707,24 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
646 struct irq_handler_data *data; 707 struct irq_handler_data *data;
647 struct ino_bucket *bucket; 708 struct ino_bucket *bucket;
648 unsigned long hv_err, cookie; 709 unsigned long hv_err, cookie;
710 unsigned int virt_irq;
649 711
650 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 712 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
651 if (unlikely(!bucket)) 713 if (unlikely(!bucket))
652 return 0; 714 return 0;
715 __flush_dcache_range((unsigned long) bucket,
716 ((unsigned long) bucket +
717 sizeof(struct ino_bucket)));
653 718
654 bucket->virt_irq = virt_irq_alloc(__irq(bucket)); 719 virt_irq = virt_irq_alloc(__irq(bucket));
655 set_irq_chip(bucket->virt_irq, &sun4v_virq); 720 bucket_set_virt_irq(__pa(bucket), virt_irq);
721 set_irq_chip(virt_irq, &sun4v_virq);
656 722
657 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); 723 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
658 if (unlikely(!data)) 724 if (unlikely(!data))
659 return 0; 725 return 0;
660 726
661 set_irq_chip_data(bucket->virt_irq, data); 727 set_irq_chip_data(virt_irq, data);
662 728
663 /* Catch accidental accesses to these things. IMAP/ICLR handling 729 /* Catch accidental accesses to these things. IMAP/ICLR handling
664 * is done by hypervisor calls on sun4v platforms, not by direct 730 * is done by hypervisor calls on sun4v platforms, not by direct
@@ -675,10 +741,10 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
675 prom_halt(); 741 prom_halt();
676 } 742 }
677 743
678 virt_to_real_irq_table[bucket->virt_irq].dev_handle = devhandle; 744 virt_to_real_irq_table[virt_irq].dev_handle = devhandle;
679 virt_to_real_irq_table[bucket->virt_irq].dev_ino = devino; 745 virt_to_real_irq_table[virt_irq].dev_ino = devino;
680 746
681 return bucket->virt_irq; 747 return virt_irq;
682} 748}
683 749
684void ack_bad_irq(unsigned int virt_irq) 750void ack_bad_irq(unsigned int virt_irq)
@@ -718,17 +784,9 @@ void handler_irq(int irq, struct pt_regs *regs)
718 unsigned long next_pa; 784 unsigned long next_pa;
719 unsigned int virt_irq; 785 unsigned int virt_irq;
720 786
721 __asm__ __volatile__("ldxa [%2] %4, %0\n\t" 787 next_pa = bucket_get_chain_pa(bucket_pa);
722 "lduwa [%3] %4, %1\n\t" 788 virt_irq = bucket_get_virt_irq(bucket_pa);
723 "stxa %%g0, [%2] %4" 789 bucket_clear_chain_pa(bucket_pa);
724 : "=&r" (next_pa), "=&r" (virt_irq)
725 : "r" (bucket_pa +
726 offsetof(struct ino_bucket,
727 irq_chain_pa)),
728 "r" (bucket_pa +
729 offsetof(struct ino_bucket,
730 virt_irq)),
731 "i" (ASI_PHYS_USE_EC));
732 790
733 __do_IRQ(virt_irq); 791 __do_IRQ(virt_irq);
734 792
@@ -957,6 +1015,8 @@ void __init init_IRQ(void)
957 prom_printf("Fatal error, cannot allocate ivector_table\n"); 1015 prom_printf("Fatal error, cannot allocate ivector_table\n");
958 prom_halt(); 1016 prom_halt();
959 } 1017 }
1018 __flush_dcache_range((unsigned long) ivector_table,
1019 ((unsigned long) ivector_table) + size);
960 1020
961 ivector_table_pa = __pa(ivector_table); 1021 ivector_table_pa = __pa(ivector_table);
962 1022
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3010227fe243..f0ab9aab308f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -631,7 +631,6 @@ void prom_world(int enter)
631 __asm__ __volatile__("flushw"); 631 __asm__ __volatile__("flushw");
632} 632}
633 633
634#ifdef DCACHE_ALIASING_POSSIBLE
635void __flush_dcache_range(unsigned long start, unsigned long end) 634void __flush_dcache_range(unsigned long start, unsigned long end)
636{ 635{
637 unsigned long va; 636 unsigned long va;
@@ -655,7 +654,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
655 "i" (ASI_DCACHE_INVALIDATE)); 654 "i" (ASI_DCACHE_INVALIDATE));
656 } 655 }
657} 656}
658#endif /* DCACHE_ALIASING_POSSIBLE */
659 657
660/* get_new_mmu_context() uses "cache + 1". */ 658/* get_new_mmu_context() uses "cache + 1". */
661DEFINE_SPINLOCK(ctx_alloc_lock); 659DEFINE_SPINLOCK(ctx_alloc_lock);