diff options
author | David Daney <ddaney@caviumnetworks.com> | 2010-07-23 13:43:46 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2010-08-05 08:26:10 -0400 |
commit | 5aae1fd4d41ea69da845e11d4766ab61666494ed (patch) | |
tree | 54b95ad6e64a13d6848b0dbb61db89dd88bf0fa6 /arch/mips/cavium-octeon/octeon-irq.c | |
parent | a894f14d7ebe9e278b496b1e653ae57f2eff514e (diff) |
MIPS: Octeon: Improve interrupt handling.
The main change is to change most of the IRQs from handle_percpu_irq
to handle_fasteoi_irq. This necessitates extracting all the .ack code
to common functions that are not exposed to the irq core.
The affinity code now acts more sanely, by doing round-robin
distribution instead of broadcasting.
Because of the change to handle_fasteoi_irq and affinity, some of the
IRQs had to be split into separate groups with their own struct
irq_chip to prevent undefined operations on specific IRQ lines.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/1485/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/cavium-octeon/octeon-irq.c')
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 356 |
1 files changed, 256 insertions, 100 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index f4b901aaf509..8fb9fb667779 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2008 Cavium Networks | 6 | * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks |
7 | */ | 7 | */ |
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
@@ -39,14 +39,14 @@ static void octeon_irq_core_ack(unsigned int irq) | |||
39 | 39 | ||
40 | static void octeon_irq_core_eoi(unsigned int irq) | 40 | static void octeon_irq_core_eoi(unsigned int irq) |
41 | { | 41 | { |
42 | struct irq_desc *desc = irq_desc + irq; | 42 | struct irq_desc *desc = irq_to_desc(irq); |
43 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 43 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
44 | /* | 44 | /* |
45 | * If an IRQ is being processed while we are disabling it the | 45 | * If an IRQ is being processed while we are disabling it the |
46 | * handler will attempt to unmask the interrupt after it has | 46 | * handler will attempt to unmask the interrupt after it has |
47 | * been disabled. | 47 | * been disabled. |
48 | */ | 48 | */ |
49 | if (desc->status & IRQ_DISABLED) | 49 | if ((unlikely(desc->status & IRQ_DISABLED))) |
50 | return; | 50 | return; |
51 | /* | 51 | /* |
52 | * We don't need to disable IRQs to make these atomic since | 52 | * We don't need to disable IRQs to make these atomic since |
@@ -104,6 +104,29 @@ static struct irq_chip octeon_irq_chip_core = { | |||
104 | 104 | ||
105 | static void octeon_irq_ciu0_ack(unsigned int irq) | 105 | static void octeon_irq_ciu0_ack(unsigned int irq) |
106 | { | 106 | { |
107 | switch (irq) { | ||
108 | case OCTEON_IRQ_GMX_DRP0: | ||
109 | case OCTEON_IRQ_GMX_DRP1: | ||
110 | case OCTEON_IRQ_IPD_DRP: | ||
111 | case OCTEON_IRQ_KEY_ZERO: | ||
112 | case OCTEON_IRQ_TIMER0: | ||
113 | case OCTEON_IRQ_TIMER1: | ||
114 | case OCTEON_IRQ_TIMER2: | ||
115 | case OCTEON_IRQ_TIMER3: | ||
116 | { | ||
117 | int index = cvmx_get_core_num() * 2; | ||
118 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
119 | /* | ||
120 | * CIU timer type interrupts must be acknoleged by | ||
121 | * writing a '1' bit to their sum0 bit. | ||
122 | */ | ||
123 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
124 | break; | ||
125 | } | ||
126 | default: | ||
127 | break; | ||
128 | } | ||
129 | |||
107 | /* | 130 | /* |
108 | * In order to avoid any locking accessing the CIU, we | 131 | * In order to avoid any locking accessing the CIU, we |
109 | * acknowledge CIU interrupts by disabling all of them. This | 132 | * acknowledge CIU interrupts by disabling all of them. This |
@@ -128,8 +151,54 @@ static void octeon_irq_ciu0_eoi(unsigned int irq) | |||
128 | set_c0_status(0x100 << 2); | 151 | set_c0_status(0x100 << 2); |
129 | } | 152 | } |
130 | 153 | ||
154 | static int next_coreid_for_irq(struct irq_desc *desc) | ||
155 | { | ||
156 | |||
157 | #ifdef CONFIG_SMP | ||
158 | int coreid; | ||
159 | int weight = cpumask_weight(desc->affinity); | ||
160 | |||
161 | if (weight > 1) { | ||
162 | int cpu = smp_processor_id(); | ||
163 | for (;;) { | ||
164 | cpu = cpumask_next(cpu, desc->affinity); | ||
165 | if (cpu >= nr_cpu_ids) { | ||
166 | cpu = -1; | ||
167 | continue; | ||
168 | } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { | ||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | coreid = octeon_coreid_for_cpu(cpu); | ||
173 | } else if (weight == 1) { | ||
174 | coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity)); | ||
175 | } else { | ||
176 | coreid = cvmx_get_core_num(); | ||
177 | } | ||
178 | return coreid; | ||
179 | #else | ||
180 | return cvmx_get_core_num(); | ||
181 | #endif | ||
182 | } | ||
183 | |||
131 | static void octeon_irq_ciu0_enable(unsigned int irq) | 184 | static void octeon_irq_ciu0_enable(unsigned int irq) |
132 | { | 185 | { |
186 | struct irq_desc *desc = irq_to_desc(irq); | ||
187 | int coreid = next_coreid_for_irq(desc); | ||
188 | unsigned long flags; | ||
189 | uint64_t en0; | ||
190 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | ||
191 | |||
192 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | ||
193 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
194 | en0 |= 1ull << bit; | ||
195 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | ||
196 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | ||
197 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); | ||
198 | } | ||
199 | |||
200 | static void octeon_irq_ciu0_enable_mbox(unsigned int irq) | ||
201 | { | ||
133 | int coreid = cvmx_get_core_num(); | 202 | int coreid = cvmx_get_core_num(); |
134 | unsigned long flags; | 203 | unsigned long flags; |
135 | uint64_t en0; | 204 | uint64_t en0; |
@@ -165,63 +234,76 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
165 | } | 234 | } |
166 | 235 | ||
167 | /* | 236 | /* |
168 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 237 | * Enable the irq on the next core in the affinity set for chips that |
169 | * registers. | 238 | * have the EN*_W1{S,C} registers. |
170 | */ | 239 | */ |
171 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) | 240 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) |
172 | { | 241 | { |
173 | int index = cvmx_get_core_num() * 2; | 242 | int index; |
174 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 243 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
244 | struct irq_desc *desc = irq_to_desc(irq); | ||
175 | 245 | ||
176 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 246 | if ((desc->status & IRQ_DISABLED) == 0) { |
247 | index = next_coreid_for_irq(desc) * 2; | ||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
249 | } | ||
177 | } | 250 | } |
178 | 251 | ||
179 | /* | 252 | /* |
180 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 253 | * Enable the irq on the current CPU for chips that |
181 | * registers. | 254 | * have the EN*_W1{S,C} registers. |
182 | */ | 255 | */ |
183 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) | 256 | static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq) |
184 | { | 257 | { |
185 | int index = cvmx_get_core_num() * 2; | 258 | int index; |
186 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 259 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
187 | 260 | ||
188 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 261 | index = cvmx_get_core_num() * 2; |
262 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
189 | } | 263 | } |
190 | 264 | ||
191 | /* | 265 | /* |
192 | * CIU timer type interrupts must be acknoleged by writing a '1' bit | 266 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
193 | * to their sum0 bit. | 267 | * registers. |
194 | */ | 268 | */ |
195 | static void octeon_irq_ciu0_timer_ack(unsigned int irq) | 269 | static void octeon_irq_ciu0_ack_v2(unsigned int irq) |
196 | { | 270 | { |
197 | int index = cvmx_get_core_num() * 2; | 271 | int index = cvmx_get_core_num() * 2; |
198 | uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 272 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
199 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
200 | } | ||
201 | 273 | ||
202 | static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq) | 274 | switch (irq) { |
203 | { | 275 | case OCTEON_IRQ_GMX_DRP0: |
204 | octeon_irq_ciu0_timer_ack(irq); | 276 | case OCTEON_IRQ_GMX_DRP1: |
205 | octeon_irq_ciu0_ack(irq); | 277 | case OCTEON_IRQ_IPD_DRP: |
206 | } | 278 | case OCTEON_IRQ_KEY_ZERO: |
279 | case OCTEON_IRQ_TIMER0: | ||
280 | case OCTEON_IRQ_TIMER1: | ||
281 | case OCTEON_IRQ_TIMER2: | ||
282 | case OCTEON_IRQ_TIMER3: | ||
283 | /* | ||
284 | * CIU timer type interrupts must be acknoleged by | ||
285 | * writing a '1' bit to their sum0 bit. | ||
286 | */ | ||
287 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | ||
288 | break; | ||
289 | default: | ||
290 | break; | ||
291 | } | ||
207 | 292 | ||
208 | static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq) | 293 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
209 | { | ||
210 | octeon_irq_ciu0_timer_ack(irq); | ||
211 | octeon_irq_ciu0_ack_v2(irq); | ||
212 | } | 294 | } |
213 | 295 | ||
214 | /* | 296 | /* |
215 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 297 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} |
216 | * registers. | 298 | * registers. |
217 | */ | 299 | */ |
218 | static void octeon_irq_ciu0_eoi_v2(unsigned int irq) | 300 | static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq) |
219 | { | 301 | { |
220 | struct irq_desc *desc = irq_desc + irq; | 302 | struct irq_desc *desc = irq_to_desc(irq); |
221 | int index = cvmx_get_core_num() * 2; | 303 | int index = cvmx_get_core_num() * 2; |
222 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 304 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
223 | 305 | ||
224 | if ((desc->status & IRQ_DISABLED) == 0) | 306 | if (likely((desc->status & IRQ_DISABLED) == 0)) |
225 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 307 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
226 | } | 308 | } |
227 | 309 | ||
@@ -244,18 +326,30 @@ static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) | |||
244 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) | 326 | static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) |
245 | { | 327 | { |
246 | int cpu; | 328 | int cpu; |
329 | struct irq_desc *desc = irq_to_desc(irq); | ||
330 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
247 | unsigned long flags; | 331 | unsigned long flags; |
248 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 332 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
249 | 333 | ||
334 | /* | ||
335 | * For non-v2 CIU, we will allow only single CPU affinity. | ||
336 | * This removes the need to do locking in the .ack/.eoi | ||
337 | * functions. | ||
338 | */ | ||
339 | if (cpumask_weight(dest) != 1) | ||
340 | return -EINVAL; | ||
341 | |||
250 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); | 342 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
251 | for_each_online_cpu(cpu) { | 343 | for_each_online_cpu(cpu) { |
252 | int coreid = octeon_coreid_for_cpu(cpu); | 344 | int coreid = octeon_coreid_for_cpu(cpu); |
253 | uint64_t en0 = | 345 | uint64_t en0 = |
254 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 346 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
255 | if (cpumask_test_cpu(cpu, dest)) | 347 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
348 | enable_one = 0; | ||
256 | en0 |= 1ull << bit; | 349 | en0 |= 1ull << bit; |
257 | else | 350 | } else { |
258 | en0 &= ~(1ull << bit); | 351 | en0 &= ~(1ull << bit); |
352 | } | ||
259 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 353 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
260 | } | 354 | } |
261 | /* | 355 | /* |
@@ -277,13 +371,18 @@ static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | |||
277 | { | 371 | { |
278 | int cpu; | 372 | int cpu; |
279 | int index; | 373 | int index; |
374 | struct irq_desc *desc = irq_to_desc(irq); | ||
375 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
280 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | 376 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); |
377 | |||
281 | for_each_online_cpu(cpu) { | 378 | for_each_online_cpu(cpu) { |
282 | index = octeon_coreid_for_cpu(cpu) * 2; | 379 | index = octeon_coreid_for_cpu(cpu) * 2; |
283 | if (cpumask_test_cpu(cpu, dest)) | 380 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
381 | enable_one = 0; | ||
284 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 382 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
285 | else | 383 | } else { |
286 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 384 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
385 | } | ||
287 | } | 386 | } |
288 | return 0; | 387 | return 0; |
289 | } | 388 | } |
@@ -296,8 +395,7 @@ static struct irq_chip octeon_irq_chip_ciu0_v2 = { | |||
296 | .name = "CIU0", | 395 | .name = "CIU0", |
297 | .enable = octeon_irq_ciu0_enable_v2, | 396 | .enable = octeon_irq_ciu0_enable_v2, |
298 | .disable = octeon_irq_ciu0_disable_all_v2, | 397 | .disable = octeon_irq_ciu0_disable_all_v2, |
299 | .ack = octeon_irq_ciu0_ack_v2, | 398 | .eoi = octeon_irq_ciu0_enable_v2, |
300 | .eoi = octeon_irq_ciu0_eoi_v2, | ||
301 | #ifdef CONFIG_SMP | 399 | #ifdef CONFIG_SMP |
302 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | 400 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, |
303 | #endif | 401 | #endif |
@@ -307,36 +405,27 @@ static struct irq_chip octeon_irq_chip_ciu0 = { | |||
307 | .name = "CIU0", | 405 | .name = "CIU0", |
308 | .enable = octeon_irq_ciu0_enable, | 406 | .enable = octeon_irq_ciu0_enable, |
309 | .disable = octeon_irq_ciu0_disable, | 407 | .disable = octeon_irq_ciu0_disable, |
310 | .ack = octeon_irq_ciu0_ack, | ||
311 | .eoi = octeon_irq_ciu0_eoi, | 408 | .eoi = octeon_irq_ciu0_eoi, |
312 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
313 | .set_affinity = octeon_irq_ciu0_set_affinity, | 410 | .set_affinity = octeon_irq_ciu0_set_affinity, |
314 | #endif | 411 | #endif |
315 | }; | 412 | }; |
316 | 413 | ||
317 | static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = { | 414 | /* The mbox versions don't do any affinity or round-robin. */ |
318 | .name = "CIU0-T", | 415 | static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = { |
319 | .enable = octeon_irq_ciu0_enable_v2, | 416 | .name = "CIU0-M", |
320 | .disable = octeon_irq_ciu0_disable_all_v2, | 417 | .enable = octeon_irq_ciu0_enable_mbox_v2, |
321 | .ack = octeon_irq_ciu0_timer_ack_v2, | 418 | .disable = octeon_irq_ciu0_disable, |
322 | .eoi = octeon_irq_ciu0_eoi_v2, | 419 | .eoi = octeon_irq_ciu0_eoi_mbox_v2, |
323 | #ifdef CONFIG_SMP | ||
324 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | ||
325 | #endif | ||
326 | }; | 420 | }; |
327 | 421 | ||
328 | static struct irq_chip octeon_irq_chip_ciu0_timer = { | 422 | static struct irq_chip octeon_irq_chip_ciu0_mbox = { |
329 | .name = "CIU0-T", | 423 | .name = "CIU0-M", |
330 | .enable = octeon_irq_ciu0_enable, | 424 | .enable = octeon_irq_ciu0_enable_mbox, |
331 | .disable = octeon_irq_ciu0_disable, | 425 | .disable = octeon_irq_ciu0_disable, |
332 | .ack = octeon_irq_ciu0_timer_ack_v1, | ||
333 | .eoi = octeon_irq_ciu0_eoi, | 426 | .eoi = octeon_irq_ciu0_eoi, |
334 | #ifdef CONFIG_SMP | ||
335 | .set_affinity = octeon_irq_ciu0_set_affinity, | ||
336 | #endif | ||
337 | }; | 427 | }; |
338 | 428 | ||
339 | |||
340 | static void octeon_irq_ciu1_ack(unsigned int irq) | 429 | static void octeon_irq_ciu1_ack(unsigned int irq) |
341 | { | 430 | { |
342 | /* | 431 | /* |
@@ -363,7 +452,8 @@ static void octeon_irq_ciu1_eoi(unsigned int irq) | |||
363 | 452 | ||
364 | static void octeon_irq_ciu1_enable(unsigned int irq) | 453 | static void octeon_irq_ciu1_enable(unsigned int irq) |
365 | { | 454 | { |
366 | int coreid = cvmx_get_core_num(); | 455 | struct irq_desc *desc = irq_to_desc(irq); |
456 | int coreid = next_coreid_for_irq(desc); | ||
367 | unsigned long flags; | 457 | unsigned long flags; |
368 | uint64_t en1; | 458 | uint64_t en1; |
369 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 459 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
@@ -376,6 +466,25 @@ static void octeon_irq_ciu1_enable(unsigned int irq) | |||
376 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | 466 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
377 | } | 467 | } |
378 | 468 | ||
469 | /* | ||
470 | * Watchdog interrupts are special. They are associated with a single | ||
471 | * core, so we hardwire the affinity to that core. | ||
472 | */ | ||
473 | static void octeon_irq_ciu1_wd_enable(unsigned int irq) | ||
474 | { | ||
475 | unsigned long flags; | ||
476 | uint64_t en1; | ||
477 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | ||
478 | int coreid = bit; | ||
479 | |||
480 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | ||
481 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
482 | en1 |= 1ull << bit; | ||
483 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | ||
484 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | ||
485 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); | ||
486 | } | ||
487 | |||
379 | static void octeon_irq_ciu1_disable(unsigned int irq) | 488 | static void octeon_irq_ciu1_disable(unsigned int irq) |
380 | { | 489 | { |
381 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 490 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
@@ -403,36 +512,43 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
403 | */ | 512 | */ |
404 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) | 513 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) |
405 | { | 514 | { |
406 | int index = cvmx_get_core_num() * 2 + 1; | 515 | int index; |
407 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 516 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
517 | struct irq_desc *desc = irq_to_desc(irq); | ||
408 | 518 | ||
409 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 519 | if ((desc->status & IRQ_DISABLED) == 0) { |
520 | index = next_coreid_for_irq(desc) * 2 + 1; | ||
521 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
522 | } | ||
410 | } | 523 | } |
411 | 524 | ||
412 | /* | 525 | /* |
413 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | 526 | * Watchdog interrupts are special. They are associated with a single |
414 | * registers. | 527 | * core, so we hardwire the affinity to that core. |
415 | */ | 528 | */ |
416 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) | 529 | static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq) |
417 | { | 530 | { |
418 | int index = cvmx_get_core_num() * 2 + 1; | 531 | int index; |
532 | int coreid = irq - OCTEON_IRQ_WDOG0; | ||
419 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 533 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
534 | struct irq_desc *desc = irq_to_desc(irq); | ||
420 | 535 | ||
421 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 536 | if ((desc->status & IRQ_DISABLED) == 0) { |
537 | index = coreid * 2 + 1; | ||
538 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
539 | } | ||
422 | } | 540 | } |
423 | 541 | ||
424 | /* | 542 | /* |
425 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} | 543 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} |
426 | * registers. | 544 | * registers. |
427 | */ | 545 | */ |
428 | static void octeon_irq_ciu1_eoi_v2(unsigned int irq) | 546 | static void octeon_irq_ciu1_ack_v2(unsigned int irq) |
429 | { | 547 | { |
430 | struct irq_desc *desc = irq_desc + irq; | ||
431 | int index = cvmx_get_core_num() * 2 + 1; | 548 | int index = cvmx_get_core_num() * 2 + 1; |
432 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 549 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
433 | 550 | ||
434 | if ((desc->status & IRQ_DISABLED) == 0) | 551 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
435 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
436 | } | 552 | } |
437 | 553 | ||
438 | /* | 554 | /* |
@@ -455,19 +571,30 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, | |||
455 | const struct cpumask *dest) | 571 | const struct cpumask *dest) |
456 | { | 572 | { |
457 | int cpu; | 573 | int cpu; |
574 | struct irq_desc *desc = irq_to_desc(irq); | ||
575 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
458 | unsigned long flags; | 576 | unsigned long flags; |
459 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 577 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
460 | 578 | ||
579 | /* | ||
580 | * For non-v2 CIU, we will allow only single CPU affinity. | ||
581 | * This removes the need to do locking in the .ack/.eoi | ||
582 | * functions. | ||
583 | */ | ||
584 | if (cpumask_weight(dest) != 1) | ||
585 | return -EINVAL; | ||
586 | |||
461 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); | 587 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
462 | for_each_online_cpu(cpu) { | 588 | for_each_online_cpu(cpu) { |
463 | int coreid = octeon_coreid_for_cpu(cpu); | 589 | int coreid = octeon_coreid_for_cpu(cpu); |
464 | uint64_t en1 = | 590 | uint64_t en1 = |
465 | cvmx_read_csr(CVMX_CIU_INTX_EN1 | 591 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
466 | (coreid * 2 + 1)); | 592 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
467 | if (cpumask_test_cpu(cpu, dest)) | 593 | enable_one = 0; |
468 | en1 |= 1ull << bit; | 594 | en1 |= 1ull << bit; |
469 | else | 595 | } else { |
470 | en1 &= ~(1ull << bit); | 596 | en1 &= ~(1ull << bit); |
597 | } | ||
471 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 598 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
472 | } | 599 | } |
473 | /* | 600 | /* |
@@ -489,13 +616,17 @@ static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | |||
489 | { | 616 | { |
490 | int cpu; | 617 | int cpu; |
491 | int index; | 618 | int index; |
619 | struct irq_desc *desc = irq_to_desc(irq); | ||
620 | int enable_one = (desc->status & IRQ_DISABLED) == 0; | ||
492 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | 621 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); |
493 | for_each_online_cpu(cpu) { | 622 | for_each_online_cpu(cpu) { |
494 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 623 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
495 | if (cpumask_test_cpu(cpu, dest)) | 624 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
625 | enable_one = 0; | ||
496 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 626 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
497 | else | 627 | } else { |
498 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 628 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
629 | } | ||
499 | } | 630 | } |
500 | return 0; | 631 | return 0; |
501 | } | 632 | } |
@@ -505,11 +636,10 @@ static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | |||
505 | * Newer octeon chips have support for lockless CIU operation. | 636 | * Newer octeon chips have support for lockless CIU operation. |
506 | */ | 637 | */ |
507 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { | 638 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { |
508 | .name = "CIU0", | 639 | .name = "CIU1", |
509 | .enable = octeon_irq_ciu1_enable_v2, | 640 | .enable = octeon_irq_ciu1_enable_v2, |
510 | .disable = octeon_irq_ciu1_disable_all_v2, | 641 | .disable = octeon_irq_ciu1_disable_all_v2, |
511 | .ack = octeon_irq_ciu1_ack_v2, | 642 | .eoi = octeon_irq_ciu1_enable_v2, |
512 | .eoi = octeon_irq_ciu1_eoi_v2, | ||
513 | #ifdef CONFIG_SMP | 643 | #ifdef CONFIG_SMP |
514 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, | 644 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, |
515 | #endif | 645 | #endif |
@@ -519,19 +649,36 @@ static struct irq_chip octeon_irq_chip_ciu1 = { | |||
519 | .name = "CIU1", | 649 | .name = "CIU1", |
520 | .enable = octeon_irq_ciu1_enable, | 650 | .enable = octeon_irq_ciu1_enable, |
521 | .disable = octeon_irq_ciu1_disable, | 651 | .disable = octeon_irq_ciu1_disable, |
522 | .ack = octeon_irq_ciu1_ack, | ||
523 | .eoi = octeon_irq_ciu1_eoi, | 652 | .eoi = octeon_irq_ciu1_eoi, |
524 | #ifdef CONFIG_SMP | 653 | #ifdef CONFIG_SMP |
525 | .set_affinity = octeon_irq_ciu1_set_affinity, | 654 | .set_affinity = octeon_irq_ciu1_set_affinity, |
526 | #endif | 655 | #endif |
527 | }; | 656 | }; |
528 | 657 | ||
658 | static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = { | ||
659 | .name = "CIU1-W", | ||
660 | .enable = octeon_irq_ciu1_wd_enable_v2, | ||
661 | .disable = octeon_irq_ciu1_disable_all_v2, | ||
662 | .eoi = octeon_irq_ciu1_wd_enable_v2, | ||
663 | }; | ||
664 | |||
665 | static struct irq_chip octeon_irq_chip_ciu1_wd = { | ||
666 | .name = "CIU1-W", | ||
667 | .enable = octeon_irq_ciu1_wd_enable, | ||
668 | .disable = octeon_irq_ciu1_disable, | ||
669 | .eoi = octeon_irq_ciu1_eoi, | ||
670 | }; | ||
671 | |||
672 | static void (*octeon_ciu0_ack)(unsigned int); | ||
673 | static void (*octeon_ciu1_ack)(unsigned int); | ||
674 | |||
529 | void __init arch_init_irq(void) | 675 | void __init arch_init_irq(void) |
530 | { | 676 | { |
531 | int irq; | 677 | unsigned int irq; |
532 | struct irq_chip *chip0; | 678 | struct irq_chip *chip0; |
533 | struct irq_chip *chip0_timer; | 679 | struct irq_chip *chip0_mbox; |
534 | struct irq_chip *chip1; | 680 | struct irq_chip *chip1; |
681 | struct irq_chip *chip1_wd; | ||
535 | 682 | ||
536 | #ifdef CONFIG_SMP | 683 | #ifdef CONFIG_SMP |
537 | /* Set the default affinity to the boot cpu. */ | 684 | /* Set the default affinity to the boot cpu. */ |
@@ -545,13 +692,19 @@ void __init arch_init_irq(void) | |||
545 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 692 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
546 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 693 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
547 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { | 694 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { |
695 | octeon_ciu0_ack = octeon_irq_ciu0_ack_v2; | ||
696 | octeon_ciu1_ack = octeon_irq_ciu1_ack_v2; | ||
548 | chip0 = &octeon_irq_chip_ciu0_v2; | 697 | chip0 = &octeon_irq_chip_ciu0_v2; |
549 | chip0_timer = &octeon_irq_chip_ciu0_timer_v2; | 698 | chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2; |
550 | chip1 = &octeon_irq_chip_ciu1_v2; | 699 | chip1 = &octeon_irq_chip_ciu1_v2; |
700 | chip1_wd = &octeon_irq_chip_ciu1_wd_v2; | ||
551 | } else { | 701 | } else { |
702 | octeon_ciu0_ack = octeon_irq_ciu0_ack; | ||
703 | octeon_ciu1_ack = octeon_irq_ciu1_ack; | ||
552 | chip0 = &octeon_irq_chip_ciu0; | 704 | chip0 = &octeon_irq_chip_ciu0; |
553 | chip0_timer = &octeon_irq_chip_ciu0_timer; | 705 | chip0_mbox = &octeon_irq_chip_ciu0_mbox; |
554 | chip1 = &octeon_irq_chip_ciu1; | 706 | chip1 = &octeon_irq_chip_ciu1; |
707 | chip1_wd = &octeon_irq_chip_ciu1_wd; | ||
555 | } | 708 | } |
556 | 709 | ||
557 | /* 0 - 15 reserved for i8259 master and slave controller. */ | 710 | /* 0 - 15 reserved for i8259 master and slave controller. */ |
@@ -565,26 +718,22 @@ void __init arch_init_irq(void) | |||
565 | /* 24 - 87 CIU_INT_SUM0 */ | 718 | /* 24 - 87 CIU_INT_SUM0 */ |
566 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | 719 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { |
567 | switch (irq) { | 720 | switch (irq) { |
568 | case OCTEON_IRQ_GMX_DRP0: | 721 | case OCTEON_IRQ_MBOX0: |
569 | case OCTEON_IRQ_GMX_DRP1: | 722 | case OCTEON_IRQ_MBOX1: |
570 | case OCTEON_IRQ_IPD_DRP: | 723 | set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq); |
571 | case OCTEON_IRQ_KEY_ZERO: | ||
572 | case OCTEON_IRQ_TIMER0: | ||
573 | case OCTEON_IRQ_TIMER1: | ||
574 | case OCTEON_IRQ_TIMER2: | ||
575 | case OCTEON_IRQ_TIMER3: | ||
576 | set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq); | ||
577 | break; | 724 | break; |
578 | default: | 725 | default: |
579 | set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); | 726 | set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq); |
580 | break; | 727 | break; |
581 | } | 728 | } |
582 | } | 729 | } |
583 | 730 | ||
584 | /* 88 - 151 CIU_INT_SUM1 */ | 731 | /* 88 - 151 CIU_INT_SUM1 */ |
585 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { | 732 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++) |
586 | set_irq_chip_and_handler(irq, chip1, handle_percpu_irq); | 733 | set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq); |
587 | } | 734 | |
735 | for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++) | ||
736 | set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq); | ||
588 | 737 | ||
589 | set_c0_status(0x300 << 2); | 738 | set_c0_status(0x300 << 2); |
590 | } | 739 | } |
@@ -600,6 +749,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
600 | unsigned long cop0_status; | 749 | unsigned long cop0_status; |
601 | uint64_t ciu_en; | 750 | uint64_t ciu_en; |
602 | uint64_t ciu_sum; | 751 | uint64_t ciu_sum; |
752 | unsigned int irq; | ||
603 | 753 | ||
604 | while (1) { | 754 | while (1) { |
605 | cop0_cause = read_c0_cause(); | 755 | cop0_cause = read_c0_cause(); |
@@ -611,18 +761,24 @@ asmlinkage void plat_irq_dispatch(void) | |||
611 | ciu_sum = cvmx_read_csr(ciu_sum0_address); | 761 | ciu_sum = cvmx_read_csr(ciu_sum0_address); |
612 | ciu_en = cvmx_read_csr(ciu_en0_address); | 762 | ciu_en = cvmx_read_csr(ciu_en0_address); |
613 | ciu_sum &= ciu_en; | 763 | ciu_sum &= ciu_en; |
614 | if (likely(ciu_sum)) | 764 | if (likely(ciu_sum)) { |
615 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); | 765 | irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1; |
616 | else | 766 | octeon_ciu0_ack(irq); |
767 | do_IRQ(irq); | ||
768 | } else { | ||
617 | spurious_interrupt(); | 769 | spurious_interrupt(); |
770 | } | ||
618 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { | 771 | } else if (unlikely(cop0_cause & STATUSF_IP3)) { |
619 | ciu_sum = cvmx_read_csr(ciu_sum1_address); | 772 | ciu_sum = cvmx_read_csr(ciu_sum1_address); |
620 | ciu_en = cvmx_read_csr(ciu_en1_address); | 773 | ciu_en = cvmx_read_csr(ciu_en1_address); |
621 | ciu_sum &= ciu_en; | 774 | ciu_sum &= ciu_en; |
622 | if (likely(ciu_sum)) | 775 | if (likely(ciu_sum)) { |
623 | do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); | 776 | irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1; |
624 | else | 777 | octeon_ciu1_ack(irq); |
778 | do_IRQ(irq); | ||
779 | } else { | ||
625 | spurious_interrupt(); | 780 | spurious_interrupt(); |
781 | } | ||
626 | } else if (likely(cop0_cause)) { | 782 | } else if (likely(cop0_cause)) { |
627 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 783 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
628 | } else { | 784 | } else { |