aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-13 21:22:57 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:12:41 -0500
commit10951ee61056a9f91c00c16746f2042672d7af7c (patch)
treead95c289332c5a3ad82b262a736620ef8f16b406 /arch/sparc64/kernel
parente3999574b48125c9bb0c95e3e9f1c696bf96c3e3 (diff)
[SPARC64]: Program IRQ registers correctly on sun4v.
Need to use hypervisor calls instead of direct register accesses. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/irq.c71
1 files changed, 47 insertions, 24 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index bcc889a53324..735b3abb85e1 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -152,7 +152,10 @@ void enable_irq(unsigned int irq)
152 preempt_disable(); 152 preempt_disable();
153 153
154 if (tlb_type == hypervisor) { 154 if (tlb_type == hypervisor) {
155 /* XXX SUN4V: implement me... XXX */ 155 int cpu = hard_smp_processor_id();
156
157 sun4v_intr_settarget(irq, cpu);
158 sun4v_intr_setenabled(irq, HV_INTR_ENABLED);
156 } else { 159 } else {
157 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 160 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
158 unsigned long ver; 161 unsigned long ver;
@@ -210,16 +213,20 @@ void disable_irq(unsigned int irq)
210 213
211 imap = bucket->imap; 214 imap = bucket->imap;
212 if (imap != 0UL) { 215 if (imap != 0UL) {
213 u32 tmp; 216 if (tlb_type == hypervisor) {
217 sun4v_intr_setenabled(irq, HV_INTR_DISABLED);
218 } else {
219 u32 tmp;
214 220
215 /* NOTE: We do not want to futz with the IRQ clear registers 221 /* NOTE: We do not want to futz with the IRQ clear registers
216 * and move the state to IDLE, the SCSI code does call 222 * and move the state to IDLE, the SCSI code does call
217 * disable_irq() to assure atomicity in the queue cmd 223 * disable_irq() to assure atomicity in the queue cmd
218 * SCSI adapter driver code. Thus we'd lose interrupts. 224 * SCSI adapter driver code. Thus we'd lose interrupts.
219 */ 225 */
220 tmp = upa_readl(imap); 226 tmp = upa_readl(imap);
221 tmp &= ~IMAP_VALID; 227 tmp &= ~IMAP_VALID;
222 upa_writel(tmp, imap); 228 upa_writel(tmp, imap);
229 }
223 } 230 }
224} 231}
225 232
@@ -257,6 +264,8 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
257 return __irq(&pil0_dummy_bucket); 264 return __irq(&pil0_dummy_bucket);
258 } 265 }
259 266
267 BUG_ON(tlb_type == hypervisor);
268
260 /* RULE: Both must be specified in all other cases. */ 269 /* RULE: Both must be specified in all other cases. */
261 if (iclr == 0UL || imap == 0UL) { 270 if (iclr == 0UL || imap == 0UL) {
262 prom_printf("Invalid build_irq %d %d %016lx %016lx\n", 271 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
@@ -633,10 +642,16 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
633 break; 642 break;
634 } 643 }
635 if (bp->pil != 0) { 644 if (bp->pil != 0) {
636 upa_writel(ICLR_IDLE, bp->iclr); 645 if (tlb_type == hypervisor) {
637 /* Test and add entropy */ 646 unsigned int irq = __irq(bp);
638 if (random & SA_SAMPLE_RANDOM) 647
639 add_interrupt_randomness(irq); 648 sun4v_intr_setstate(irq, HV_INTR_STATE_IDLE);
649 } else {
650 upa_writel(ICLR_IDLE, bp->iclr);
651 /* Test and add entropy */
652 if (random & SA_SAMPLE_RANDOM)
653 add_interrupt_randomness(irq);
654 }
640 } 655 }
641out: 656out:
642 bp->flags &= ~IBF_INPROGRESS; 657 bp->flags &= ~IBF_INPROGRESS;
@@ -769,24 +784,32 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
769{ 784{
770 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table; 785 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
771 unsigned long imap = bucket->imap; 786 unsigned long imap = bucket->imap;
772 unsigned int tid;
773 787
774 while (!cpu_online(goal_cpu)) { 788 while (!cpu_online(goal_cpu)) {
775 if (++goal_cpu >= NR_CPUS) 789 if (++goal_cpu >= NR_CPUS)
776 goal_cpu = 0; 790 goal_cpu = 0;
777 } 791 }
778 792
779 if (tlb_type == cheetah || tlb_type == cheetah_plus) { 793 if (tlb_type == hypervisor) {
780 tid = goal_cpu << 26; 794 unsigned int irq = __irq(bucket);
781 tid &= IMAP_AID_SAFARI; 795
782 } else if (this_is_starfire == 0) { 796 sun4v_intr_settarget(irq, goal_cpu);
783 tid = goal_cpu << 26; 797 sun4v_intr_setenabled(irq, HV_INTR_ENABLED);
784 tid &= IMAP_TID_UPA;
785 } else { 798 } else {
786 tid = (starfire_translate(imap, goal_cpu) << 26); 799 unsigned int tid;
787 tid &= IMAP_TID_UPA; 800
801 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
802 tid = goal_cpu << 26;
803 tid &= IMAP_AID_SAFARI;
804 } else if (this_is_starfire == 0) {
805 tid = goal_cpu << 26;
806 tid &= IMAP_TID_UPA;
807 } else {
808 tid = (starfire_translate(imap, goal_cpu) << 26);
809 tid &= IMAP_TID_UPA;
810 }
811 upa_writel(tid | IMAP_VALID, imap);
788 } 812 }
789 upa_writel(tid | IMAP_VALID, imap);
790 813
791 do { 814 do {
792 if (++goal_cpu >= NR_CPUS) 815 if (++goal_cpu >= NR_CPUS)