diff options
Diffstat (limited to 'arch/mips/netlogic/common/irq.c')
-rw-r--r-- | arch/mips/netlogic/common/irq.c | 165 |
1 files changed, 105 insertions, 60 deletions
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c index e52bfcbce09..00dcc7a2bc5 100644 --- a/arch/mips/netlogic/common/irq.c +++ b/arch/mips/netlogic/common/irq.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/linkage.h> | 37 | #include <linux/linkage.h> |
38 | #include <linux/interrupt.h> | 38 | #include <linux/interrupt.h> |
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
41 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
42 | #include <linux/irq.h> | 41 | #include <linux/irq.h> |
@@ -59,68 +58,70 @@ | |||
59 | #elif defined(CONFIG_CPU_XLR) | 58 | #elif defined(CONFIG_CPU_XLR) |
60 | #include <asm/netlogic/xlr/iomap.h> | 59 | #include <asm/netlogic/xlr/iomap.h> |
61 | #include <asm/netlogic/xlr/pic.h> | 60 | #include <asm/netlogic/xlr/pic.h> |
61 | #include <asm/netlogic/xlr/fmn.h> | ||
62 | #else | 62 | #else |
63 | #error "Unknown CPU" | 63 | #error "Unknown CPU" |
64 | #endif | 64 | #endif |
65 | /* | ||
66 | * These are the routines that handle all the low level interrupt stuff. | ||
67 | * Actions handled here are: initialization of the interrupt map, requesting of | ||
68 | * interrupt lines by handlers, dispatching if interrupts to handlers, probing | ||
69 | * for interrupt lines | ||
70 | */ | ||
71 | 65 | ||
72 | /* Globals */ | 66 | #ifdef CONFIG_SMP |
73 | static uint64_t nlm_irq_mask; | 67 | #define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \ |
74 | static DEFINE_SPINLOCK(nlm_pic_lock); | 68 | (1ULL << IRQ_IPI_SMP_RESCHEDULE)) |
69 | #else | ||
70 | #define SMP_IRQ_MASK 0 | ||
71 | #endif | ||
72 | #define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \ | ||
73 | (1ull << IRQ_FMN)) | ||
74 | |||
75 | struct nlm_pic_irq { | ||
76 | void (*extra_ack)(struct irq_data *); | ||
77 | struct nlm_soc_info *node; | ||
78 | int picirq; | ||
79 | int irt; | ||
80 | int flags; | ||
81 | }; | ||
75 | 82 | ||
76 | static void xlp_pic_enable(struct irq_data *d) | 83 | static void xlp_pic_enable(struct irq_data *d) |
77 | { | 84 | { |
78 | unsigned long flags; | 85 | unsigned long flags; |
79 | int irt; | 86 | struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); |
80 | 87 | ||
81 | irt = nlm_irq_to_irt(d->irq); | 88 | BUG_ON(!pd); |
82 | if (irt == -1) | 89 | spin_lock_irqsave(&pd->node->piclock, flags); |
83 | return; | 90 | nlm_pic_enable_irt(pd->node->picbase, pd->irt); |
84 | spin_lock_irqsave(&nlm_pic_lock, flags); | 91 | spin_unlock_irqrestore(&pd->node->piclock, flags); |
85 | nlm_pic_enable_irt(nlm_pic_base, irt); | ||
86 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
87 | } | 92 | } |
88 | 93 | ||
89 | static void xlp_pic_disable(struct irq_data *d) | 94 | static void xlp_pic_disable(struct irq_data *d) |
90 | { | 95 | { |
96 | struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); | ||
91 | unsigned long flags; | 97 | unsigned long flags; |
92 | int irt; | ||
93 | 98 | ||
94 | irt = nlm_irq_to_irt(d->irq); | 99 | BUG_ON(!pd); |
95 | if (irt == -1) | 100 | spin_lock_irqsave(&pd->node->piclock, flags); |
96 | return; | 101 | nlm_pic_disable_irt(pd->node->picbase, pd->irt); |
97 | spin_lock_irqsave(&nlm_pic_lock, flags); | 102 | spin_unlock_irqrestore(&pd->node->piclock, flags); |
98 | nlm_pic_disable_irt(nlm_pic_base, irt); | ||
99 | spin_unlock_irqrestore(&nlm_pic_lock, flags); | ||
100 | } | 103 | } |
101 | 104 | ||
102 | static void xlp_pic_mask_ack(struct irq_data *d) | 105 | static void xlp_pic_mask_ack(struct irq_data *d) |
103 | { | 106 | { |
104 | uint64_t mask = 1ull << d->irq; | 107 | struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); |
108 | uint64_t mask = 1ull << pd->picirq; | ||
105 | 109 | ||
106 | write_c0_eirr(mask); /* ack by writing EIRR */ | 110 | write_c0_eirr(mask); /* ack by writing EIRR */ |
107 | } | 111 | } |
108 | 112 | ||
109 | static void xlp_pic_unmask(struct irq_data *d) | 113 | static void xlp_pic_unmask(struct irq_data *d) |
110 | { | 114 | { |
111 | void *hd = irq_data_get_irq_handler_data(d); | 115 | struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d); |
112 | int irt; | ||
113 | 116 | ||
114 | irt = nlm_irq_to_irt(d->irq); | 117 | if (!pd) |
115 | if (irt == -1) | ||
116 | return; | 118 | return; |
117 | 119 | ||
118 | if (hd) { | 120 | if (pd->extra_ack) |
119 | void (*extra_ack)(void *) = hd; | 121 | pd->extra_ack(d); |
120 | extra_ack(d); | 122 | |
121 | } | ||
122 | /* Ack is a single write, no need to lock */ | 123 | /* Ack is a single write, no need to lock */ |
123 | nlm_pic_ack(nlm_pic_base, irt); | 124 | nlm_pic_ack(pd->node->picbase, pd->irt); |
124 | } | 125 | } |
125 | 126 | ||
126 | static struct irq_chip xlp_pic = { | 127 | static struct irq_chip xlp_pic = { |
@@ -174,64 +175,108 @@ struct irq_chip nlm_cpu_intr = { | |||
174 | .irq_eoi = cpuintr_ack, | 175 | .irq_eoi = cpuintr_ack, |
175 | }; | 176 | }; |
176 | 177 | ||
177 | void __init init_nlm_common_irqs(void) | 178 | static void __init nlm_init_percpu_irqs(void) |
178 | { | 179 | { |
179 | int i, irq, irt; | 180 | int i; |
180 | 181 | ||
181 | for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) | 182 | for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) |
182 | irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); | 183 | irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); |
183 | |||
184 | for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++) | ||
185 | irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq); | ||
186 | |||
187 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
188 | irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, | 185 | irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, |
189 | nlm_smp_function_ipi_handler); | 186 | nlm_smp_function_ipi_handler); |
190 | irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, | 187 | irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, |
191 | nlm_smp_resched_ipi_handler); | 188 | nlm_smp_resched_ipi_handler); |
192 | nlm_irq_mask |= | ||
193 | ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); | ||
194 | #endif | 189 | #endif |
190 | } | ||
191 | |||
192 | void nlm_setup_pic_irq(int node, int picirq, int irq, int irt) | ||
193 | { | ||
194 | struct nlm_pic_irq *pic_data; | ||
195 | int xirq; | ||
196 | |||
197 | xirq = nlm_irq_to_xirq(node, irq); | ||
198 | pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL); | ||
199 | BUG_ON(pic_data == NULL); | ||
200 | pic_data->irt = irt; | ||
201 | pic_data->picirq = picirq; | ||
202 | pic_data->node = nlm_get_node(node); | ||
203 | irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq); | ||
204 | irq_set_handler_data(xirq, pic_data); | ||
205 | } | ||
206 | |||
207 | void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *)) | ||
208 | { | ||
209 | struct nlm_pic_irq *pic_data; | ||
210 | int xirq; | ||
211 | |||
212 | xirq = nlm_irq_to_xirq(node, irq); | ||
213 | pic_data = irq_get_handler_data(xirq); | ||
214 | pic_data->extra_ack = xack; | ||
215 | } | ||
195 | 216 | ||
196 | for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) { | 217 | static void nlm_init_node_irqs(int node) |
197 | irt = nlm_irq_to_irt(irq); | 218 | { |
219 | int i, irt; | ||
220 | uint64_t irqmask; | ||
221 | struct nlm_soc_info *nodep; | ||
222 | |||
223 | pr_info("Init IRQ for node %d\n", node); | ||
224 | nodep = nlm_get_node(node); | ||
225 | irqmask = PERCPU_IRQ_MASK; | ||
226 | for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) { | ||
227 | irt = nlm_irq_to_irt(i); | ||
198 | if (irt == -1) | 228 | if (irt == -1) |
199 | continue; | 229 | continue; |
200 | nlm_irq_mask |= (1ULL << irq); | 230 | nlm_setup_pic_irq(node, i, i, irt); |
201 | nlm_pic_init_irt(nlm_pic_base, irt, irq, 0); | 231 | /* set interrupts to first cpu in node */ |
232 | nlm_pic_init_irt(nodep->picbase, irt, i, | ||
233 | node * NLM_CPUS_PER_NODE); | ||
234 | irqmask |= (1ull << i); | ||
202 | } | 235 | } |
203 | 236 | nodep->irqmask = irqmask; | |
204 | nlm_irq_mask |= (1ULL << IRQ_TIMER); | ||
205 | } | 237 | } |
206 | 238 | ||
207 | void __init arch_init_irq(void) | 239 | void __init arch_init_irq(void) |
208 | { | 240 | { |
209 | /* Initialize the irq descriptors */ | 241 | /* Initialize the irq descriptors */ |
210 | init_nlm_common_irqs(); | 242 | nlm_init_percpu_irqs(); |
211 | 243 | nlm_init_node_irqs(0); | |
212 | write_c0_eimr(nlm_irq_mask); | 244 | write_c0_eimr(nlm_current_node()->irqmask); |
245 | #if defined(CONFIG_CPU_XLR) | ||
246 | nlm_setup_fmn_irq(); | ||
247 | #endif | ||
213 | } | 248 | } |
214 | 249 | ||
215 | void __cpuinit nlm_smp_irq_init(void) | 250 | void nlm_smp_irq_init(int hwcpuid) |
216 | { | 251 | { |
217 | /* set interrupt mask for non-zero cpus */ | 252 | int node, cpu; |
218 | write_c0_eimr(nlm_irq_mask); | 253 | |
254 | node = hwcpuid / NLM_CPUS_PER_NODE; | ||
255 | cpu = hwcpuid % NLM_CPUS_PER_NODE; | ||
256 | |||
257 | if (cpu == 0 && node != 0) | ||
258 | nlm_init_node_irqs(node); | ||
259 | write_c0_eimr(nlm_current_node()->irqmask); | ||
219 | } | 260 | } |
220 | 261 | ||
221 | asmlinkage void plat_irq_dispatch(void) | 262 | asmlinkage void plat_irq_dispatch(void) |
222 | { | 263 | { |
223 | uint64_t eirr; | 264 | uint64_t eirr; |
224 | int i; | 265 | int i, node; |
225 | 266 | ||
267 | node = nlm_nodeid(); | ||
226 | eirr = read_c0_eirr() & read_c0_eimr(); | 268 | eirr = read_c0_eirr() & read_c0_eimr(); |
227 | if (eirr & (1 << IRQ_TIMER)) { | ||
228 | do_IRQ(IRQ_TIMER); | ||
229 | return; | ||
230 | } | ||
231 | 269 | ||
232 | i = __ilog2_u64(eirr); | 270 | i = __ilog2_u64(eirr); |
233 | if (i == -1) | 271 | if (i == -1) |
234 | return; | 272 | return; |
235 | 273 | ||
236 | do_IRQ(i); | 274 | /* per-CPU IRQs don't need translation */ |
275 | if (eirr & PERCPU_IRQ_MASK) { | ||
276 | do_IRQ(i); | ||
277 | return; | ||
278 | } | ||
279 | |||
280 | /* top level irq handling */ | ||
281 | do_IRQ(nlm_irq_to_xirq(node, i)); | ||
237 | } | 282 | } |