aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/pmcs-msp71xx/msp_irq_cic.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/pmcs-msp71xx/msp_irq_cic.c')
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_cic.c216
1 files changed, 216 insertions, 0 deletions
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
new file mode 100644
index 000000000000..e49b499f66db
--- /dev/null
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -0,0 +1,216 @@
1/*
2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
3 *
4 * This file define the irq handler for MSP CIC subsystem interrupts.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/irq.h>
17
18#include <asm/mipsregs.h>
19
20#include <msp_cic_int.h>
21#include <msp_regs.h>
22
23/*
24 * External API
25 */
26extern void msp_per_irq_init(void);
27extern void msp_per_irq_dispatch(void);
28
29
30/*
31 * Convenience Macro. Should be somewhere generic.
32 */
33#define get_current_vpe() \
34 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
35
36#ifdef CONFIG_SMP
37
38#define LOCK_VPE(flags, mtflags) \
39do { \
40 local_irq_save(flags); \
41 mtflags = dmt(); \
42} while (0)
43
44#define UNLOCK_VPE(flags, mtflags) \
45do { \
46 emt(mtflags); \
47 local_irq_restore(flags);\
48} while (0)
49
50#define LOCK_CORE(flags, mtflags) \
51do { \
52 local_irq_save(flags); \
53 mtflags = dvpe(); \
54} while (0)
55
56#define UNLOCK_CORE(flags, mtflags) \
57do { \
58 evpe(mtflags); \
59 local_irq_restore(flags);\
60} while (0)
61
62#else
63
64#define LOCK_VPE(flags, mtflags)
65#define UNLOCK_VPE(flags, mtflags)
66#endif
67
68/* ensure writes to cic are completed */
69static inline void cic_wmb(void)
70{
71 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
72 volatile u32 dummy_read;
73
74 wmb();
75 dummy_read = __raw_readl(cic_mem);
76 dummy_read++;
77}
78
79static void unmask_cic_irq(struct irq_data *d)
80{
81 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
82 int vpe;
83#ifdef CONFIG_SMP
84 unsigned int mtflags;
85 unsigned long flags;
86
87 /*
88 * Make sure we have IRQ affinity. It may have changed while
89 * we were processing the IRQ.
90 */
91 if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
92 return;
93#endif
94
95 vpe = get_current_vpe();
96 LOCK_VPE(flags, mtflags);
97 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
98 UNLOCK_VPE(flags, mtflags);
99 cic_wmb();
100}
101
102static void mask_cic_irq(struct irq_data *d)
103{
104 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
105 int vpe = get_current_vpe();
106#ifdef CONFIG_SMP
107 unsigned long flags, mtflags;
108#endif
109 LOCK_VPE(flags, mtflags);
110 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
111 UNLOCK_VPE(flags, mtflags);
112 cic_wmb();
113}
114static void msp_cic_irq_ack(struct irq_data *d)
115{
116 mask_cic_irq(d);
117 /*
118 * Only really necessary for 18, 16-14 and sometimes 3:0
119 * (since these can be edge sensitive) but it doesn't
120 * hurt for the others
121 */
122 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
123 smtc_im_ack_irq(d->irq);
124}
125
126/*Note: Limiting to VSMP . Not tested in SMTC */
127
128#ifdef CONFIG_MIPS_MT_SMP
129static int msp_cic_irq_set_affinity(struct irq_data *d,
130 const struct cpumask *cpumask, bool force)
131{
132 int cpu;
133 unsigned long flags;
134 unsigned int mtflags;
135 unsigned long imask = (1 << (irq - MSP_CIC_INTBASE));
136 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
137
138 /* timer balancing should be disabled in kernel code */
139 BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER);
140
141 LOCK_CORE(flags, mtflags);
142 /* enable if any of each VPE's TCs require this IRQ */
143 for_each_online_cpu(cpu) {
144 if (cpumask_test_cpu(cpu, cpumask))
145 cic_mask[cpu] |= imask;
146 else
147 cic_mask[cpu] &= ~imask;
148
149 }
150
151 UNLOCK_CORE(flags, mtflags);
152 return 0;
153
154}
155#endif
156
157static struct irq_chip msp_cic_irq_controller = {
158 .name = "MSP_CIC",
159 .irq_mask = mask_cic_irq,
160 .irq_mask_ack = msp_cic_irq_ack,
161 .irq_unmask = unmask_cic_irq,
162 .irq_ack = msp_cic_irq_ack,
163#ifdef CONFIG_MIPS_MT_SMP
164 .irq_set_affinity = msp_cic_irq_set_affinity,
165#endif
166};
167
168void __init msp_cic_irq_init(void)
169{
170 int i;
171 /* Mask/clear interrupts. */
172 *CIC_VPE0_MSK_REG = 0x00000000;
173 *CIC_VPE1_MSK_REG = 0x00000000;
174 *CIC_STS_REG = 0xFFFFFFFF;
175 /*
176 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
177 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
178 * They are to be active low, level sensitive.
179 */
180 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
181
182 /* initialize all the IRQ descriptors */
183 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
184 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
185 handle_level_irq);
186#ifdef CONFIG_MIPS_MT_SMTC
187 /* Mask of CIC interrupt */
188 irq_hwmask[i] = C_IRQ4;
189#endif
190 }
191
192 /* Initialize the PER interrupt sub-system */
193 msp_per_irq_init();
194}
195
196/* CIC masked by CIC vector processing before dispatch called */
197void msp_cic_irq_dispatch(void)
198{
199 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
200 u32 cic_mask;
201 u32 pending;
202 int cic_status = *CIC_STS_REG;
203 cic_mask = cic_msk_reg[get_current_vpe()];
204 pending = cic_status & cic_mask;
205 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
206 do_IRQ(MSP_INT_VPE0_TIMER);
207 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
208 do_IRQ(MSP_INT_VPE1_TIMER);
209 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
210 msp_per_irq_dispatch();
211 } else if (pending) {
212 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
213 } else{
214 spurious_interrupt();
215 }
216}