aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-03-23 17:09:06 -0400
committerRalf Baechle <ralf@linux-mips.org>2011-03-25 13:45:18 -0400
commitd7881fbdf866d7d0fc3fd65805b47a837217ba2b (patch)
treef1d754e8c2aabd8d6c5b39f472a6b6d1d765068e /arch/mips
parent7ec8af9e3441478472954c43462376dd83302e00 (diff)
MIPS: msp71xx: Convert to new irq_chip functions
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2197/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c41
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_per.c80
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c18
3 files changed, 46 insertions, 93 deletions
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
index e64458a833e2..352f29d9226f 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
@@ -77,7 +77,7 @@ static inline void cic_wmb(void)
77 dummy_read++; 77 dummy_read++;
78} 78}
79 79
80static inline void unmask_cic_irq(unsigned int irq) 80static void unmask_cic_irq(struct irq_data *d)
81{ 81{
82 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; 82 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
83 int vpe; 83 int vpe;
@@ -89,18 +89,18 @@ static inline void unmask_cic_irq(unsigned int irq)
89 * Make sure we have IRQ affinity. It may have changed while 89 * Make sure we have IRQ affinity. It may have changed while
90 * we were processing the IRQ. 90 * we were processing the IRQ.
91 */ 91 */
92 if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) 92 if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
93 return; 93 return;
94#endif 94#endif
95 95
96 vpe = get_current_vpe(); 96 vpe = get_current_vpe();
97 LOCK_VPE(flags, mtflags); 97 LOCK_VPE(flags, mtflags);
98 cic_msk_reg[vpe] |= (1 << (irq - MSP_CIC_INTBASE)); 98 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
99 UNLOCK_VPE(flags, mtflags); 99 UNLOCK_VPE(flags, mtflags);
100 cic_wmb(); 100 cic_wmb();
101} 101}
102 102
103static inline void mask_cic_irq(unsigned int irq) 103static void mask_cic_irq(struct irq_data *d)
104{ 104{
105 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG; 105 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
106 int vpe = get_current_vpe(); 106 int vpe = get_current_vpe();
@@ -108,33 +108,27 @@ static inline void mask_cic_irq(unsigned int irq)
108 unsigned long flags, mtflags; 108 unsigned long flags, mtflags;
109#endif 109#endif
110 LOCK_VPE(flags, mtflags); 110 LOCK_VPE(flags, mtflags);
111 cic_msk_reg[vpe] &= ~(1 << (irq - MSP_CIC_INTBASE)); 111 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
112 UNLOCK_VPE(flags, mtflags); 112 UNLOCK_VPE(flags, mtflags);
113 cic_wmb(); 113 cic_wmb();
114} 114}
115static inline void msp_cic_irq_ack(unsigned int irq) 115static void msp_cic_irq_ack(struct irq_data *d)
116{ 116{
117 mask_cic_irq(irq); 117 mask_cic_irq(d);
118 /* 118 /*
119 * Only really necessary for 18, 16-14 and sometimes 3:0 119 * Only really necessary for 18, 16-14 and sometimes 3:0
120 * (since these can be edge sensitive) but it doesn't 120 * (since these can be edge sensitive) but it doesn't
121 * hurt for the others 121 * hurt for the others
122 */ 122 */
123 *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE)); 123 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
124 smtc_im_ack_irq(irq); 124 smtc_im_ack_irq(d->irq);
125}
126
127static void msp_cic_irq_end(unsigned int irq)
128{
129 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
130 unmask_cic_irq(irq);
131} 125}
132 126
133/*Note: Limiting to VSMP . Not tested in SMTC */ 127/*Note: Limiting to VSMP . Not tested in SMTC */
134 128
135#ifdef CONFIG_MIPS_MT_SMP 129#ifdef CONFIG_MIPS_MT_SMP
136static inline int msp_cic_irq_set_affinity(unsigned int irq, 130static int msp_cic_irq_set_affinity(struct irq_data *d,
137 const struct cpumask *cpumask) 131 const struct cpumask *cpumask, bool force)
138{ 132{
139 int cpu; 133 int cpu;
140 unsigned long flags; 134 unsigned long flags;
@@ -163,13 +157,12 @@ static inline int msp_cic_irq_set_affinity(unsigned int irq,
163 157
164static struct irq_chip msp_cic_irq_controller = { 158static struct irq_chip msp_cic_irq_controller = {
165 .name = "MSP_CIC", 159 .name = "MSP_CIC",
166 .mask = mask_cic_irq, 160 .irq_mask = mask_cic_irq,
167 .mask_ack = msp_cic_irq_ack, 161 .irq_mask_ack = msp_cic_irq_ack,
168 .unmask = unmask_cic_irq, 162 .irq_unmask = unmask_cic_irq,
169 .ack = msp_cic_irq_ack, 163 .irq_ack = msp_cic_irq_ack,
170 .end = msp_cic_irq_end,
171#ifdef CONFIG_MIPS_MT_SMP 164#ifdef CONFIG_MIPS_MT_SMP
172 .set_affinity = msp_cic_irq_set_affinity, 165 .irq_set_affinity = msp_cic_irq_set_affinity,
173#endif 166#endif
174}; 167};
175 168
@@ -220,7 +213,5 @@ void msp_cic_irq_dispatch(void)
220 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1); 213 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
221 } else{ 214 } else{
222 spurious_interrupt(); 215 spurious_interrupt();
223 /* Re-enable the CIC cascaded interrupt. */
224 irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
225 } 216 }
226} 217}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
index 72bcd70d2ddf..f9b9dcdfa9dd 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
@@ -48,100 +48,61 @@ static inline void per_wmb(void)
48 dummy_read++; 48 dummy_read++;
49} 49}
50 50
51static inline void unmask_per_irq(unsigned int irq) 51static inline void unmask_per_irq(struct irq_data *d)
52{ 52{
53#ifdef CONFIG_SMP 53#ifdef CONFIG_SMP
54 unsigned long flags; 54 unsigned long flags;
55 spin_lock_irqsave(&per_lock, flags); 55 spin_lock_irqsave(&per_lock, flags);
56 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE)); 56 *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
57 spin_unlock_irqrestore(&per_lock, flags); 57 spin_unlock_irqrestore(&per_lock, flags);
58#else 58#else
59 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE)); 59 *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
60#endif 60#endif
61 per_wmb(); 61 per_wmb();
62} 62}
63 63
64static inline void mask_per_irq(unsigned int irq) 64static inline void mask_per_irq(struct irq_data *d)
65{ 65{
66#ifdef CONFIG_SMP 66#ifdef CONFIG_SMP
67 unsigned long flags; 67 unsigned long flags;
68 spin_lock_irqsave(&per_lock, flags); 68 spin_lock_irqsave(&per_lock, flags);
69 *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE)); 69 *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
70 spin_unlock_irqrestore(&per_lock, flags); 70 spin_unlock_irqrestore(&per_lock, flags);
71#else 71#else
72 *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE)); 72 *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
73#endif 73#endif
74 per_wmb(); 74 per_wmb();
75} 75}
76 76
77static inline void msp_per_irq_enable(unsigned int irq) 77static inline void msp_per_irq_ack(struct irq_data *d)
78{ 78{
79 unmask_per_irq(irq); 79 mask_per_irq(d);
80}
81
82static inline void msp_per_irq_disable(unsigned int irq)
83{
84 mask_per_irq(irq);
85}
86
87static unsigned int msp_per_irq_startup(unsigned int irq)
88{
89 msp_per_irq_enable(irq);
90 return 0;
91}
92
93#define msp_per_irq_shutdown msp_per_irq_disable
94
95static inline void msp_per_irq_ack(unsigned int irq)
96{
97 mask_per_irq(irq);
98 /* 80 /*
99 * In the PER interrupt controller, only bits 11 and 10 81 * In the PER interrupt controller, only bits 11 and 10
100 * are write-to-clear, (SPI TX complete, SPI RX complete). 82 * are write-to-clear, (SPI TX complete, SPI RX complete).
101 * It does nothing for any others. 83 * It does nothing for any others.
102 */ 84 */
103 85 *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
104 *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE));
105
106 /* Re-enable the CIC cascaded interrupt and return */
107 irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
108}
109
110static void msp_per_irq_end(unsigned int irq)
111{
112 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
113 unmask_per_irq(irq);
114} 86}
115 87
116#ifdef CONFIG_SMP 88#ifdef CONFIG_SMP
117static inline int msp_per_irq_set_affinity(unsigned int irq, 89static int msp_per_irq_set_affinity(struct irq_data *d,
118 const struct cpumask *affinity) 90 const struct cpumask *affinity, bool force)
119{ 91{
120 unsigned long flags; 92 /* WTF is this doing ????? */
121 /* 93 unmask_per_irq(d);
122 * Calls to ack, end, startup, enable are spinlocked in setup_irq and
123 * __do_IRQ.Callers of this function do not spinlock,so we need to
124 * do so ourselves.
125 */
126 raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
127 msp_per_irq_enable(irq);
128 raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
129 return 0; 94 return 0;
130
131} 95}
132#endif 96#endif
133 97
134static struct irq_chip msp_per_irq_controller = { 98static struct irq_chip msp_per_irq_controller = {
135 .name = "MSP_PER", 99 .name = "MSP_PER",
136 .startup = msp_per_irq_startup, 100 .irq_enable = unmask_per_irq.
137 .shutdown = msp_per_irq_shutdown, 101 .irq_disable = mask_per_irq,
138 .enable = msp_per_irq_enable, 102 .irq_ack = msp_per_irq_ack,
139 .disable = msp_per_irq_disable,
140#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
141 .set_affinity = msp_per_irq_set_affinity, 104 .irq_set_affinity = msp_per_irq_set_affinity,
142#endif 105#endif
143 .ack = msp_per_irq_ack,
144 .end = msp_per_irq_end,
145}; 106};
146 107
147void __init msp_per_irq_init(void) 108void __init msp_per_irq_init(void)
@@ -152,10 +113,7 @@ void __init msp_per_irq_init(void)
152 *PER_INT_STS_REG = 0xFFFFFFFF; 113 *PER_INT_STS_REG = 0xFFFFFFFF;
153 /* initialize all the IRQ descriptors */ 114 /* initialize all the IRQ descriptors */
154 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { 115 for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
155 irq_desc[i].status = IRQ_DISABLED; 116 irq_set_chip(i, &msp_per_irq_controller);
156 irq_desc[i].action = NULL;
157 irq_desc[i].depth = 1;
158 irq_desc[i].chip = &msp_per_irq_controller;
159#ifdef CONFIG_MIPS_MT_SMTC 117#ifdef CONFIG_MIPS_MT_SMTC
160 irq_hwmask[i] = C_IRQ4; 118 irq_hwmask[i] = C_IRQ4;
161#endif 119#endif
@@ -173,7 +131,5 @@ void msp_per_irq_dispatch(void)
173 do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1); 131 do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
174 } else { 132 } else {
175 spurious_interrupt(); 133 spurious_interrupt();
176 /* Re-enable the CIC cascaded interrupt and return */
177 irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
178 } 134 }
179} 135}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
index 61f390232346..8f51e4adc438 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
@@ -21,8 +21,10 @@
21#include <msp_slp_int.h> 21#include <msp_slp_int.h>
22#include <msp_regs.h> 22#include <msp_regs.h>
23 23
24static inline void unmask_msp_slp_irq(unsigned int irq) 24static inline void unmask_msp_slp_irq(struct irq_data *d)
25{ 25{
26 unsigned int irq = d->irq;
27
26 /* check for PER interrupt range */ 28 /* check for PER interrupt range */
27 if (irq < MSP_PER_INTBASE) 29 if (irq < MSP_PER_INTBASE)
28 *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE)); 30 *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE));
@@ -30,8 +32,10 @@ static inline void unmask_msp_slp_irq(unsigned int irq)
30 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE)); 32 *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
31} 33}
32 34
33static inline void mask_msp_slp_irq(unsigned int irq) 35static inline void mask_msp_slp_irq(struct irq_data *d)
34{ 36{
37 unsigned int irq = d->irq;
38
35 /* check for PER interrupt range */ 39 /* check for PER interrupt range */
36 if (irq < MSP_PER_INTBASE) 40 if (irq < MSP_PER_INTBASE)
37 *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE)); 41 *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE));
@@ -43,8 +47,10 @@ static inline void mask_msp_slp_irq(unsigned int irq)
43 * While we ack the interrupt interrupts are disabled and thus we don't need 47 * While we ack the interrupt interrupts are disabled and thus we don't need
44 * to deal with concurrency issues. Same for msp_slp_irq_end. 48 * to deal with concurrency issues. Same for msp_slp_irq_end.
45 */ 49 */
46static inline void ack_msp_slp_irq(unsigned int irq) 50static inline void ack_msp_slp_irq(struct irq_data *d)
47{ 51{
52 unsigned int irq = d->irq;
53
48 /* check for PER interrupt range */ 54 /* check for PER interrupt range */
49 if (irq < MSP_PER_INTBASE) 55 if (irq < MSP_PER_INTBASE)
50 *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE)); 56 *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE));
@@ -54,9 +60,9 @@ static inline void ack_msp_slp_irq(unsigned int irq)
54 60
55static struct irq_chip msp_slp_irq_controller = { 61static struct irq_chip msp_slp_irq_controller = {
56 .name = "MSP_SLP", 62 .name = "MSP_SLP",
57 .ack = ack_msp_slp_irq, 63 .irq_ack = ack_msp_slp_irq,
58 .mask = mask_msp_slp_irq, 64 .irq_mask = mask_msp_slp_irq,
59 .unmask = unmask_msp_slp_irq, 65 .irq_unmask = unmask_msp_slp_irq,
60}; 66};
61 67
62void __init msp_slp_irq_init(void) 68void __init msp_slp_irq_init(void)