aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorAnoop P A <anoop.pa@gmail.com>2011-01-25 03:20:10 -0500
committerRalf Baechle <ralf@linux-mips.org>2011-03-25 13:45:14 -0400
commit92592c9ccac9ab9c652533e08d0daad06f1dc501 (patch)
treede70ba77bb97f0b9444beb9a8bb4b0d6fc60b3da /arch/mips
parent3b042d0830463056a669a12362c940a94f7e3cd7 (diff)
MIPS: MSP71xx: Add vectored interrupt support.
This patch will add vectored interrupt setups required for MIPS MT modes. irq_cic has been restructured and moved per irq handler to different file. irq_cic has been re wrote to support mips MT modes ( VSMP / SMTC ) [Ralf: fixed some more checkpatch warnings.] Signed-off-by: Anoop P A <anoop.pa@gmail.com> To: linux-mips@linux-mips.org To: linux-kernel@vger.kernel.org To: dhowells@redhat.com Patchwork: https://patchwork.linux-mips.org/patch/2041/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/pmc-sierra/msp71xx/Makefile2
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq.c56
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c248
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_per.c179
4 files changed, 397 insertions, 88 deletions
diff --git a/arch/mips/pmc-sierra/msp71xx/Makefile b/arch/mips/pmc-sierra/msp71xx/Makefile
index e107f79b1491..b25f3542e6d1 100644
--- a/arch/mips/pmc-sierra/msp71xx/Makefile
+++ b/arch/mips/pmc-sierra/msp71xx/Makefile
@@ -6,7 +6,7 @@ obj-y += msp_prom.o msp_setup.o msp_irq.o \
6obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o 6obj-$(CONFIG_HAVE_GPIO_LIB) += gpio.o gpio_extended.o
7obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o 7obj-$(CONFIG_PMC_MSP7120_GW) += msp_hwbutton.o
8obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o 8obj-$(CONFIG_IRQ_MSP_SLP) += msp_irq_slp.o
9obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o 9obj-$(CONFIG_IRQ_MSP_CIC) += msp_irq_cic.o msp_irq_per.o
10obj-$(CONFIG_PCI) += msp_pci.o 10obj-$(CONFIG_PCI) += msp_pci.o
11obj-$(CONFIG_MSPETH) += msp_eth.o 11obj-$(CONFIG_MSPETH) += msp_eth.o
12obj-$(CONFIG_USB_MSP71XX) += msp_usb.o 12obj-$(CONFIG_USB_MSP71XX) += msp_usb.o
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
index 734d598a2e3a..4531c4a514bc 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c
@@ -19,8 +19,6 @@
19 19
20#include <msp_int.h> 20#include <msp_int.h>
21 21
22extern void msp_int_handle(void);
23
24/* SLP bases systems */ 22/* SLP bases systems */
25extern void msp_slp_irq_init(void); 23extern void msp_slp_irq_init(void);
26extern void msp_slp_irq_dispatch(void); 24extern void msp_slp_irq_dispatch(void);
@@ -29,6 +27,18 @@ extern void msp_slp_irq_dispatch(void);
29extern void msp_cic_irq_init(void); 27extern void msp_cic_irq_init(void);
30extern void msp_cic_irq_dispatch(void); 28extern void msp_cic_irq_dispatch(void);
31 29
30/* VSMP support init */
31extern void msp_vsmp_int_init(void);
32
33/* vectored interrupt implementation */
34
35/* SW0/1 interrupts are used for SMP/SMTC */
36static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); }
37static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); }
38static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); }
39static inline void usb_int_dispatch(void) { do_IRQ(MSP_INT_USB); }
40static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); }
41
32/* 42/*
33 * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded 43 * The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded
34 * hierarchical system. The first level are the direct MIPS interrupts 44 * hierarchical system. The first level are the direct MIPS interrupts
@@ -96,29 +106,57 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
96 do_IRQ(MSP_INT_SW1); 106 do_IRQ(MSP_INT_SW1);
97} 107}
98 108
99static struct irqaction cascade_msp = { 109static struct irqaction cic_cascade_msp = {
100 .handler = no_action, 110 .handler = no_action,
101 .name = "MSP cascade" 111 .name = "MSP CIC cascade"
102}; 112};
103 113
114static struct irqaction per_cascade_msp = {
115 .handler = no_action,
116 .name = "MSP PER cascade"
117};
104 118
105void __init arch_init_irq(void) 119void __init arch_init_irq(void)
106{ 120{
121 /* assume we'll be using vectored interrupt mode except in UP mode*/
122#ifdef CONFIG_MIPS_MT
123 BUG_ON(!cpu_has_vint);
124#endif
107 /* initialize the 1st-level CPU based interrupt controller */ 125 /* initialize the 1st-level CPU based interrupt controller */
108 mips_cpu_irq_init(); 126 mips_cpu_irq_init();
109 127
110#ifdef CONFIG_IRQ_MSP_CIC 128#ifdef CONFIG_IRQ_MSP_CIC
111 msp_cic_irq_init(); 129 msp_cic_irq_init();
112 130#ifdef CONFIG_MIPS_MT
131 set_vi_handler(MSP_INT_CIC, msp_cic_irq_dispatch);
132 set_vi_handler(MSP_INT_MAC0, mac0_int_dispatch);
133 set_vi_handler(MSP_INT_MAC1, mac1_int_dispatch);
134 set_vi_handler(MSP_INT_SAR, mac2_int_dispatch);
135 set_vi_handler(MSP_INT_USB, usb_int_dispatch);
136 set_vi_handler(MSP_INT_SEC, sec_int_dispatch);
137#ifdef CONFIG_MIPS_MT_SMP
138 msp_vsmp_int_init();
139#elif defined CONFIG_MIPS_MT_SMTC
140 /*Set hwmask for all platform devices */
141 irq_hwmask[MSP_INT_MAC0] = C_IRQ0;
142 irq_hwmask[MSP_INT_MAC1] = C_IRQ1;
143 irq_hwmask[MSP_INT_USB] = C_IRQ2;
144 irq_hwmask[MSP_INT_SAR] = C_IRQ3;
145 irq_hwmask[MSP_INT_SEC] = C_IRQ5;
146
147#endif /* CONFIG_MIPS_MT_SMP */
148#endif /* CONFIG_MIPS_MT */
113 /* setup the cascaded interrupts */ 149 /* setup the cascaded interrupts */
114 setup_irq(MSP_INT_CIC, &cascade_msp); 150 setup_irq(MSP_INT_CIC, &cic_cascade_msp);
115 setup_irq(MSP_INT_PER, &cascade_msp); 151 setup_irq(MSP_INT_PER, &per_cascade_msp);
152
116#else 153#else
117 /* setup the 2nd-level SLP register based interrupt controller */ 154 /* setup the 2nd-level SLP register based interrupt controller */
155 /* VSMP /SMTC support support is not enabled for SLP */
118 msp_slp_irq_init(); 156 msp_slp_irq_init();
119 157
120 /* setup the cascaded SLP/PER interrupts */ 158 /* setup the cascaded SLP/PER interrupts */
121 setup_irq(MSP_INT_SLP, &cascade_msp); 159 setup_irq(MSP_INT_SLP, &cic_cascade_msp);
122 setup_irq(MSP_INT_PER, &cascade_msp); 160 setup_irq(MSP_INT_PER, &per_cascade_msp);
123#endif 161#endif
124} 162}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
index 07e71ff2433f..e64458a833e2 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * This file define the irq handler for MSP SLM subsystem interrupts. 2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
3 * 3 *
4 * Copyright 2005-2007 PMC-Sierra, Inc, derived from irq_cpu.c 4 * This file define the irq handler for MSP CIC subsystem interrupts.
5 * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
6 * 5 *
7 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -16,119 +15,212 @@
16#include <linux/bitops.h> 15#include <linux/bitops.h>
17#include <linux/irq.h> 16#include <linux/irq.h>
18 17
18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21#include <msp_cic_int.h> 21#include <msp_cic_int.h>
22#include <msp_regs.h> 22#include <msp_regs.h>
23 23
24/* 24/*
25 * NOTE: We are only enabling support for VPE0 right now. 25 * External API
26 */ 26 */
27extern void msp_per_irq_init(void);
28extern void msp_per_irq_dispatch(void);
27 29
28static inline void unmask_msp_cic_irq(unsigned int irq) 30
31/*
32 * Convenience Macro. Should be somewhere generic.
33 */
34#define get_current_vpe() \
35 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
36
37#ifdef CONFIG_SMP
38
39#define LOCK_VPE(flags, mtflags) \
40do { \
41 local_irq_save(flags); \
42 mtflags = dmt(); \
43} while (0)
44
45#define UNLOCK_VPE(flags, mtflags) \
46do { \
47 emt(mtflags); \
48 local_irq_restore(flags);\
49} while (0)
50
51#define LOCK_CORE(flags, mtflags) \
52do { \
53 local_irq_save(flags); \
54 mtflags = dvpe(); \
55} while (0)
56
57#define UNLOCK_CORE(flags, mtflags) \
58do { \
59 evpe(mtflags); \
60 local_irq_restore(flags);\
61} while (0)
62
63#else
64
65#define LOCK_VPE(flags, mtflags)
66#define UNLOCK_VPE(flags, mtflags)
67#endif
68
69/* ensure writes to cic are completed */
70static inline void cic_wmb(void)
29{ 71{
72 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
73 volatile u32 dummy_read;
30 74
31 /* check for PER interrupt range */ 75 wmb();
32 if (irq < MSP_PER_INTBASE) 76 dummy_read = __raw_readl(cic_mem);
33 *CIC_VPE0_MSK_REG |= (1 << (irq - MSP_CIC_INTBASE)); 77 dummy_read++;
34 else
35 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
36} 78}
37 79
38static inline void mask_msp_cic_irq(unsigned int irq) 80static inline void unmask_cic_irq(unsigned int irq)
39{ 81{
40 /* check for PER interrupt range */ 82 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
41 if (irq < MSP_PER_INTBASE) 83 int vpe;
42 *CIC_VPE0_MSK_REG &= ~(1 << (irq - MSP_CIC_INTBASE)); 84#ifdef CONFIG_SMP
43 else 85 unsigned int mtflags;
44 *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE)); 86 unsigned long flags;
87
88 /*
89 * Make sure we have IRQ affinity. It may have changed while
90 * we were processing the IRQ.
91 */
92 if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity))
93 return;
94#endif
95
96 vpe = get_current_vpe();
97 LOCK_VPE(flags, mtflags);
98 cic_msk_reg[vpe] |= (1 << (irq - MSP_CIC_INTBASE));
99 UNLOCK_VPE(flags, mtflags);
100 cic_wmb();
45} 101}
46 102
47/* 103static inline void mask_cic_irq(unsigned int irq)
48 * While we ack the interrupt interrupts are disabled and thus we don't need
49 * to deal with concurrency issues. Same for msp_cic_irq_end.
50 */
51static inline void ack_msp_cic_irq(unsigned int irq)
52{ 104{
53 mask_msp_cic_irq(irq); 105 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
54 106 int vpe = get_current_vpe();
107#ifdef CONFIG_SMP
108 unsigned long flags, mtflags;
109#endif
110 LOCK_VPE(flags, mtflags);
111 cic_msk_reg[vpe] &= ~(1 << (irq - MSP_CIC_INTBASE));
112 UNLOCK_VPE(flags, mtflags);
113 cic_wmb();
114}
115static inline void msp_cic_irq_ack(unsigned int irq)
116{
117 mask_cic_irq(irq);
55 /* 118 /*
56 * only really necessary for 18, 16-14 and sometimes 3:0 (since 119 * Only really necessary for 18, 16-14 and sometimes 3:0
57 * these can be edge sensitive) but it doesn't hurt for the others. 120 * (since these can be edge sensitive) but it doesn't
58 */ 121 * hurt for the others
59 122 */
60 /* check for PER interrupt range */ 123 *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE));
61 if (irq < MSP_PER_INTBASE) 124 smtc_im_ack_irq(irq);
62 *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE)); 125}
63 else 126
64 *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE)); 127static void msp_cic_irq_end(unsigned int irq)
128{
129 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
130 unmask_cic_irq(irq);
131}
132
133/*Note: Limiting to VSMP . Not tested in SMTC */
134
135#ifdef CONFIG_MIPS_MT_SMP
136static inline int msp_cic_irq_set_affinity(unsigned int irq,
137 const struct cpumask *cpumask)
138{
139 int cpu;
140 unsigned long flags;
141 unsigned int mtflags;
142 unsigned long imask = (1 << (irq - MSP_CIC_INTBASE));
143 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
144
145 /* timer balancing should be disabled in kernel code */
146 BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER);
147
148 LOCK_CORE(flags, mtflags);
149 /* enable if any of each VPE's TCs require this IRQ */
150 for_each_online_cpu(cpu) {
151 if (cpumask_test_cpu(cpu, cpumask))
152 cic_mask[cpu] |= imask;
153 else
154 cic_mask[cpu] &= ~imask;
155
156 }
157
158 UNLOCK_CORE(flags, mtflags);
159 return 0;
160
65} 161}
162#endif
66 163
67static struct irq_chip msp_cic_irq_controller = { 164static struct irq_chip msp_cic_irq_controller = {
68 .name = "MSP_CIC", 165 .name = "MSP_CIC",
69 .ack = ack_msp_cic_irq, 166 .mask = mask_cic_irq,
70 .mask = ack_msp_cic_irq, 167 .mask_ack = msp_cic_irq_ack,
71 .mask_ack = ack_msp_cic_irq, 168 .unmask = unmask_cic_irq,
72 .unmask = unmask_msp_cic_irq, 169 .ack = msp_cic_irq_ack,
170 .end = msp_cic_irq_end,
171#ifdef CONFIG_MIPS_MT_SMP
172 .set_affinity = msp_cic_irq_set_affinity,
173#endif
73}; 174};
74 175
75
76void __init msp_cic_irq_init(void) 176void __init msp_cic_irq_init(void)
77{ 177{
78 int i; 178 int i;
79
80 /* Mask/clear interrupts. */ 179 /* Mask/clear interrupts. */
81 *CIC_VPE0_MSK_REG = 0x00000000; 180 *CIC_VPE0_MSK_REG = 0x00000000;
82 *PER_INT_MSK_REG = 0x00000000; 181 *CIC_VPE1_MSK_REG = 0x00000000;
83 *CIC_STS_REG = 0xFFFFFFFF; 182 *CIC_STS_REG = 0xFFFFFFFF;
84 *PER_INT_STS_REG = 0xFFFFFFFF;
85
86#if defined(CONFIG_PMC_MSP7120_GW) || \
87 defined(CONFIG_PMC_MSP7120_EVAL)
88 /* 183 /*
89 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI. 184 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
90 * These inputs map to EXT_INT_POL[6:4] inside the CIC. 185 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
91 * They are to be active low, level sensitive. 186 * They are to be active low, level sensitive.
92 */ 187 */
93 *CIC_EXT_CFG_REG &= 0xFFFF8F8F; 188 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
94#endif
95 189
96 /* initialize all the IRQ descriptors */ 190 /* initialize all the IRQ descriptors */
97 for (i = MSP_CIC_INTBASE; i < MSP_PER_INTBASE + 32; i++) 191 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
98 set_irq_chip_and_handler(i, &msp_cic_irq_controller, 192 set_irq_chip_and_handler(i, &msp_cic_irq_controller,
99 handle_level_irq); 193 handle_level_irq);
194#ifdef CONFIG_MIPS_MT_SMTC
195 /* Mask of CIC interrupt */
196 irq_hwmask[i] = C_IRQ4;
197#endif
198 }
199
200 /* Initialize the PER interrupt sub-system */
201 msp_per_irq_init();
100} 202}
101 203
204/* CIC masked by CIC vector processing before dispatch called */
102void msp_cic_irq_dispatch(void) 205void msp_cic_irq_dispatch(void)
103{ 206{
104 u32 pending; 207 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
105 int intbase; 208 u32 cic_mask;
106 209 u32 pending;
107 intbase = MSP_CIC_INTBASE; 210 int cic_status = *CIC_STS_REG;
108 pending = *CIC_STS_REG & *CIC_VPE0_MSK_REG; 211 cic_mask = cic_msk_reg[get_current_vpe()];
109 212 pending = cic_status & cic_mask;
110 /* check for PER interrupt */ 213 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
111 if (pending == (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
112 intbase = MSP_PER_INTBASE;
113 pending = *PER_INT_STS_REG & *PER_INT_MSK_REG;
114 }
115
116 /* check for spurious interrupt */
117 if (pending == 0x00000000) {
118 printk(KERN_ERR
119 "Spurious %s interrupt? status %08x, mask %08x\n",
120 (intbase == MSP_CIC_INTBASE) ? "CIC" : "PER",
121 (intbase == MSP_CIC_INTBASE) ?
122 *CIC_STS_REG : *PER_INT_STS_REG,
123 (intbase == MSP_CIC_INTBASE) ?
124 *CIC_VPE0_MSK_REG : *PER_INT_MSK_REG);
125 return;
126 }
127
128 /* check for the timer and dispatch it first */
129 if ((intbase == MSP_CIC_INTBASE) &&
130 (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))))
131 do_IRQ(MSP_INT_VPE0_TIMER); 214 do_IRQ(MSP_INT_VPE0_TIMER);
132 else 215 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
133 do_IRQ(ffs(pending) + intbase - 1); 216 do_IRQ(MSP_INT_VPE1_TIMER);
217 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
218 msp_per_irq_dispatch();
219 } else if (pending) {
220 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
221 } else{
222 spurious_interrupt();
223 /* Re-enable the CIC cascaded interrupt. */
224 irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
225 }
134} 226}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
new file mode 100644
index 000000000000..72bcd70d2ddf
--- /dev/null
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
3 *
4 * This file define the irq handler for MSP PER subsystem interrupts.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/spinlock.h>
16#include <linux/bitops.h>
17
18#include <asm/mipsregs.h>
19#include <asm/system.h>
20
21#include <msp_cic_int.h>
22#include <msp_regs.h>
23
24
25/*
26 * Convenience Macro. Should be somewhere generic.
27 */
28#define get_current_vpe() \
29 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
30
31#ifdef CONFIG_SMP
32/*
33 * The PER registers must be protected from concurrent access.
34 */
35
36static DEFINE_SPINLOCK(per_lock);
37#endif
38
39/* ensure writes to per are completed */
40
41static inline void per_wmb(void)
42{
43 const volatile void __iomem *per_mem = PER_INT_MSK_REG;
44 volatile u32 dummy_read;
45
46 wmb();
47 dummy_read = __raw_readl(per_mem);
48 dummy_read++;
49}
50
51static inline void unmask_per_irq(unsigned int irq)
52{
53#ifdef CONFIG_SMP
54 unsigned long flags;
55 spin_lock_irqsave(&per_lock, flags);
56 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
57 spin_unlock_irqrestore(&per_lock, flags);
58#else
59 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
60#endif
61 per_wmb();
62}
63
64static inline void mask_per_irq(unsigned int irq)
65{
66#ifdef CONFIG_SMP
67 unsigned long flags;
68 spin_lock_irqsave(&per_lock, flags);
69 *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE));
70 spin_unlock_irqrestore(&per_lock, flags);
71#else
72 *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE));
73#endif
74 per_wmb();
75}
76
77static inline void msp_per_irq_enable(unsigned int irq)
78{
79 unmask_per_irq(irq);
80}
81
82static inline void msp_per_irq_disable(unsigned int irq)
83{
84 mask_per_irq(irq);
85}
86
87static unsigned int msp_per_irq_startup(unsigned int irq)
88{
89 msp_per_irq_enable(irq);
90 return 0;
91}
92
93#define msp_per_irq_shutdown msp_per_irq_disable
94
95static inline void msp_per_irq_ack(unsigned int irq)
96{
97 mask_per_irq(irq);
98 /*
99 * In the PER interrupt controller, only bits 11 and 10
100 * are write-to-clear, (SPI TX complete, SPI RX complete).
101 * It does nothing for any others.
102 */
103
104 *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE));
105
106 /* Re-enable the CIC cascaded interrupt and return */
107 irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
108}
109
110static void msp_per_irq_end(unsigned int irq)
111{
112 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
113 unmask_per_irq(irq);
114}
115
116#ifdef CONFIG_SMP
117static inline int msp_per_irq_set_affinity(unsigned int irq,
118 const struct cpumask *affinity)
119{
120 unsigned long flags;
121 /*
122 * Calls to ack, end, startup, enable are spinlocked in setup_irq and
123 * __do_IRQ.Callers of this function do not spinlock,so we need to
124 * do so ourselves.
125 */
126 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
127 msp_per_irq_enable(irq);
128 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
129 return 0;
130
131}
132#endif
133
134static struct irq_chip msp_per_irq_controller = {
135 .name = "MSP_PER",
136 .startup = msp_per_irq_startup,
137 .shutdown = msp_per_irq_shutdown,
138 .enable = msp_per_irq_enable,
139 .disable = msp_per_irq_disable,
140#ifdef CONFIG_SMP
141 .set_affinity = msp_per_irq_set_affinity,
142#endif
143 .ack = msp_per_irq_ack,
144 .end = msp_per_irq_end,
145};
146
147void __init msp_per_irq_init(void)
148{
149 int i;
150 /* Mask/clear interrupts. */
151 *PER_INT_MSK_REG = 0x00000000;
152 *PER_INT_STS_REG = 0xFFFFFFFF;
153 /* initialize all the IRQ descriptors */
154 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
155 irq_desc[i].status = IRQ_DISABLED;
156 irq_desc[i].action = NULL;
157 irq_desc[i].depth = 1;
158 irq_desc[i].chip = &msp_per_irq_controller;
159#ifdef CONFIG_MIPS_MT_SMTC
160 irq_hwmask[i] = C_IRQ4;
161#endif
162 }
163}
164
165void msp_per_irq_dispatch(void)
166{
167 u32 per_mask = *PER_INT_MSK_REG;
168 u32 per_status = *PER_INT_STS_REG;
169 u32 pending;
170
171 pending = per_status & per_mask;
172 if (pending) {
173 do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
174 } else {
175 spurious_interrupt();
176 /* Re-enable the CIC cascaded interrupt and return */
177 irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
178 }
179}