diff options
author | David Daney <ddaney@caviumnetworks.com> | 2009-10-13 14:26:03 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2009-11-02 06:00:07 -0500 |
commit | cd847b7857b835f9730d6fc93c3f423fcacc50f7 (patch) | |
tree | 530d31b2ea1c088f5cc6340baf7c2b6a7111e159 /arch/mips | |
parent | b6b74d5490c3ad88de503e0c5d44e4820b79b678 (diff) |
MIPS: Octeon: Use lockless interrupt controller operations when possible.
Some newer Octeon chips have registers that allow lockless operation of
the interrupt controller. Take advantage of them.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 214 |
1 files changed, 178 insertions, 36 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 0bda5c5db150..6f2acf09328d 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -17,6 +17,15 @@ DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); | |||
17 | DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); | 17 | DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); |
18 | DEFINE_SPINLOCK(octeon_irq_msi_lock); | 18 | DEFINE_SPINLOCK(octeon_irq_msi_lock); |
19 | 19 | ||
20 | static int octeon_coreid_for_cpu(int cpu) | ||
21 | { | ||
22 | #ifdef CONFIG_SMP | ||
23 | return cpu_logical_map(cpu); | ||
24 | #else | ||
25 | return cvmx_get_core_num(); | ||
26 | #endif | ||
27 | } | ||
28 | |||
20 | static void octeon_irq_core_ack(unsigned int irq) | 29 | static void octeon_irq_core_ack(unsigned int irq) |
21 | { | 30 | { |
22 | unsigned int bit = irq - OCTEON_IRQ_SW0; | 31 | unsigned int bit = irq - OCTEON_IRQ_SW0; |
@@ -152,11 +161,10 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
152 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 161 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
153 | unsigned long flags; | 162 | unsigned long flags; |
154 | uint64_t en0; | 163 | uint64_t en0; |
155 | #ifdef CONFIG_SMP | ||
156 | int cpu; | 164 | int cpu; |
157 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | 165 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); |
158 | for_each_online_cpu(cpu) { | 166 | for_each_online_cpu(cpu) { |
159 | int coreid = cpu_logical_map(cpu); | 167 | int coreid = octeon_coreid_for_cpu(cpu); |
160 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 168 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
161 | en0 &= ~(1ull << bit); | 169 | en0 &= ~(1ull << bit); |
162 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 170 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
@@ -167,15 +175,45 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
167 | */ | 175 | */ |
168 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | 176 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
169 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 177 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); |
170 | #else | 178 | } |
171 | int coreid = cvmx_get_core_num(); | 179 | |
172 | local_irq_save(flags); | 180 | /* |
173 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 181 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} |
174 | en0 &= ~(1ull << bit); | 182 | * registers. |
175 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 183 | */ |
176 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 184 | static void octeon_irq_ciu0_enable_v2(unsigned int irq) |
177 | local_irq_restore(flags); | 185 | { |
178 | #endif | 186 | int index = cvmx_get_core_num() * 2; |
187 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
188 | |||
189 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | ||
194 | * registers. | ||
195 | */ | ||
196 | static void octeon_irq_ciu0_disable_v2(unsigned int irq) | ||
197 | { | ||
198 | int index = cvmx_get_core_num() * 2; | ||
199 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
200 | |||
201 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | ||
206 | * registers. | ||
207 | */ | ||
208 | static void octeon_irq_ciu0_disable_all_v2(unsigned int irq) | ||
209 | { | ||
210 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
211 | int index; | ||
212 | int cpu; | ||
213 | for_each_online_cpu(cpu) { | ||
214 | index = octeon_coreid_for_cpu(cpu) * 2; | ||
215 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
216 | } | ||
179 | } | 217 | } |
180 | 218 | ||
181 | #ifdef CONFIG_SMP | 219 | #ifdef CONFIG_SMP |
@@ -187,7 +225,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
187 | 225 | ||
188 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | 226 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); |
189 | for_each_online_cpu(cpu) { | 227 | for_each_online_cpu(cpu) { |
190 | int coreid = cpu_logical_map(cpu); | 228 | int coreid = octeon_coreid_for_cpu(cpu); |
191 | uint64_t en0 = | 229 | uint64_t en0 = |
192 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 230 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
193 | if (cpumask_test_cpu(cpu, dest)) | 231 | if (cpumask_test_cpu(cpu, dest)) |
@@ -205,8 +243,42 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
205 | 243 | ||
206 | return 0; | 244 | return 0; |
207 | } | 245 | } |
246 | |||
247 | /* | ||
248 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | ||
249 | * registers. | ||
250 | */ | ||
251 | static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq, | ||
252 | const struct cpumask *dest) | ||
253 | { | ||
254 | int cpu; | ||
255 | int index; | ||
256 | u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); | ||
257 | for_each_online_cpu(cpu) { | ||
258 | index = octeon_coreid_for_cpu(cpu) * 2; | ||
259 | if (cpumask_test_cpu(cpu, dest)) | ||
260 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | ||
261 | else | ||
262 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | ||
263 | } | ||
264 | return 0; | ||
265 | } | ||
208 | #endif | 266 | #endif |
209 | 267 | ||
268 | /* | ||
269 | * Newer octeon chips have support for lockless CIU operation. | ||
270 | */ | ||
271 | static struct irq_chip octeon_irq_chip_ciu0_v2 = { | ||
272 | .name = "CIU0", | ||
273 | .enable = octeon_irq_ciu0_enable_v2, | ||
274 | .disable = octeon_irq_ciu0_disable_all_v2, | ||
275 | .ack = octeon_irq_ciu0_disable_v2, | ||
276 | .eoi = octeon_irq_ciu0_enable_v2, | ||
277 | #ifdef CONFIG_SMP | ||
278 | .set_affinity = octeon_irq_ciu0_set_affinity_v2, | ||
279 | #endif | ||
280 | }; | ||
281 | |||
210 | static struct irq_chip octeon_irq_chip_ciu0 = { | 282 | static struct irq_chip octeon_irq_chip_ciu0 = { |
211 | .name = "CIU0", | 283 | .name = "CIU0", |
212 | .enable = octeon_irq_ciu0_enable, | 284 | .enable = octeon_irq_ciu0_enable, |
@@ -270,11 +342,10 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
270 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 342 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
271 | unsigned long flags; | 343 | unsigned long flags; |
272 | uint64_t en1; | 344 | uint64_t en1; |
273 | #ifdef CONFIG_SMP | ||
274 | int cpu; | 345 | int cpu; |
275 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | 346 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); |
276 | for_each_online_cpu(cpu) { | 347 | for_each_online_cpu(cpu) { |
277 | int coreid = cpu_logical_map(cpu); | 348 | int coreid = octeon_coreid_for_cpu(cpu); |
278 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 349 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
279 | en1 &= ~(1ull << bit); | 350 | en1 &= ~(1ull << bit); |
280 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 351 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
@@ -285,19 +356,50 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
285 | */ | 356 | */ |
286 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 357 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
287 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 358 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); |
288 | #else | 359 | } |
289 | int coreid = cvmx_get_core_num(); | 360 | |
290 | local_irq_save(flags); | 361 | /* |
291 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 362 | * Enable the irq on the current core for chips that have the EN*_W1{S,C} |
292 | en1 &= ~(1ull << bit); | 363 | * registers. |
293 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 364 | */ |
294 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 365 | static void octeon_irq_ciu1_enable_v2(unsigned int irq) |
295 | local_irq_restore(flags); | 366 | { |
296 | #endif | 367 | int index = cvmx_get_core_num() * 2 + 1; |
368 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
369 | |||
370 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * Disable the irq on the current core for chips that have the EN*_W1{S,C} | ||
375 | * registers. | ||
376 | */ | ||
377 | static void octeon_irq_ciu1_disable_v2(unsigned int irq) | ||
378 | { | ||
379 | int index = cvmx_get_core_num() * 2 + 1; | ||
380 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
381 | |||
382 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * Disable the irq on the all cores for chips that have the EN*_W1{S,C} | ||
387 | * registers. | ||
388 | */ | ||
389 | static void octeon_irq_ciu1_disable_all_v2(unsigned int irq) | ||
390 | { | ||
391 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
392 | int index; | ||
393 | int cpu; | ||
394 | for_each_online_cpu(cpu) { | ||
395 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
396 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
397 | } | ||
297 | } | 398 | } |
298 | 399 | ||
299 | #ifdef CONFIG_SMP | 400 | #ifdef CONFIG_SMP |
300 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) | 401 | static int octeon_irq_ciu1_set_affinity(unsigned int irq, |
402 | const struct cpumask *dest) | ||
301 | { | 403 | { |
302 | int cpu; | 404 | int cpu; |
303 | unsigned long flags; | 405 | unsigned long flags; |
@@ -305,7 +407,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask * | |||
305 | 407 | ||
306 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | 408 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); |
307 | for_each_online_cpu(cpu) { | 409 | for_each_online_cpu(cpu) { |
308 | int coreid = cpu_logical_map(cpu); | 410 | int coreid = octeon_coreid_for_cpu(cpu); |
309 | uint64_t en1 = | 411 | uint64_t en1 = |
310 | cvmx_read_csr(CVMX_CIU_INTX_EN1 | 412 | cvmx_read_csr(CVMX_CIU_INTX_EN1 |
311 | (coreid * 2 + 1)); | 413 | (coreid * 2 + 1)); |
@@ -324,8 +426,42 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask * | |||
324 | 426 | ||
325 | return 0; | 427 | return 0; |
326 | } | 428 | } |
429 | |||
430 | /* | ||
431 | * Set affinity for the irq for chips that have the EN*_W1{S,C} | ||
432 | * registers. | ||
433 | */ | ||
434 | static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq, | ||
435 | const struct cpumask *dest) | ||
436 | { | ||
437 | int cpu; | ||
438 | int index; | ||
439 | u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); | ||
440 | for_each_online_cpu(cpu) { | ||
441 | index = octeon_coreid_for_cpu(cpu) * 2 + 1; | ||
442 | if (cpumask_test_cpu(cpu, dest)) | ||
443 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | ||
444 | else | ||
445 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | ||
446 | } | ||
447 | return 0; | ||
448 | } | ||
327 | #endif | 449 | #endif |
328 | 450 | ||
451 | /* | ||
452 | * Newer octeon chips have support for lockless CIU operation. | ||
453 | */ | ||
454 | static struct irq_chip octeon_irq_chip_ciu1_v2 = { | ||
455 | .name = "CIU0", | ||
456 | .enable = octeon_irq_ciu1_enable_v2, | ||
457 | .disable = octeon_irq_ciu1_disable_all_v2, | ||
458 | .ack = octeon_irq_ciu1_disable_v2, | ||
459 | .eoi = octeon_irq_ciu1_enable_v2, | ||
460 | #ifdef CONFIG_SMP | ||
461 | .set_affinity = octeon_irq_ciu1_set_affinity_v2, | ||
462 | #endif | ||
463 | }; | ||
464 | |||
329 | static struct irq_chip octeon_irq_chip_ciu1 = { | 465 | static struct irq_chip octeon_irq_chip_ciu1 = { |
330 | .name = "CIU1", | 466 | .name = "CIU1", |
331 | .enable = octeon_irq_ciu1_enable, | 467 | .enable = octeon_irq_ciu1_enable, |
@@ -422,6 +558,8 @@ static struct irq_chip octeon_irq_chip_msi = { | |||
422 | void __init arch_init_irq(void) | 558 | void __init arch_init_irq(void) |
423 | { | 559 | { |
424 | int irq; | 560 | int irq; |
561 | struct irq_chip *chip0; | ||
562 | struct irq_chip *chip1; | ||
425 | 563 | ||
426 | #ifdef CONFIG_SMP | 564 | #ifdef CONFIG_SMP |
427 | /* Set the default affinity to the boot cpu. */ | 565 | /* Set the default affinity to the boot cpu. */ |
@@ -432,6 +570,16 @@ void __init arch_init_irq(void) | |||
432 | if (NR_IRQS < OCTEON_IRQ_LAST) | 570 | if (NR_IRQS < OCTEON_IRQ_LAST) |
433 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); | 571 | pr_err("octeon_irq_init: NR_IRQS is set too low\n"); |
434 | 572 | ||
573 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | ||
574 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | ||
575 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { | ||
576 | chip0 = &octeon_irq_chip_ciu0_v2; | ||
577 | chip1 = &octeon_irq_chip_ciu1_v2; | ||
578 | } else { | ||
579 | chip0 = &octeon_irq_chip_ciu0; | ||
580 | chip1 = &octeon_irq_chip_ciu1; | ||
581 | } | ||
582 | |||
435 | /* 0 - 15 reserved for i8259 master and slave controller. */ | 583 | /* 0 - 15 reserved for i8259 master and slave controller. */ |
436 | 584 | ||
437 | /* 17 - 23 Mips internal */ | 585 | /* 17 - 23 Mips internal */ |
@@ -442,14 +590,12 @@ void __init arch_init_irq(void) | |||
442 | 590 | ||
443 | /* 24 - 87 CIU_INT_SUM0 */ | 591 | /* 24 - 87 CIU_INT_SUM0 */ |
444 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { | 592 | for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { |
445 | set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0, | 593 | set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); |
446 | handle_percpu_irq); | ||
447 | } | 594 | } |
448 | 595 | ||
449 | /* 88 - 151 CIU_INT_SUM1 */ | 596 | /* 88 - 151 CIU_INT_SUM1 */ |
450 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { | 597 | for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { |
451 | set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1, | 598 | set_irq_chip_and_handler(irq, chip1, handle_percpu_irq); |
452 | handle_percpu_irq); | ||
453 | } | 599 | } |
454 | 600 | ||
455 | #ifdef CONFIG_PCI_MSI | 601 | #ifdef CONFIG_PCI_MSI |
@@ -507,14 +653,10 @@ asmlinkage void plat_irq_dispatch(void) | |||
507 | #ifdef CONFIG_HOTPLUG_CPU | 653 | #ifdef CONFIG_HOTPLUG_CPU |
508 | static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) | 654 | static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) |
509 | { | 655 | { |
510 | unsigned int isset; | 656 | unsigned int isset; |
511 | #ifdef CONFIG_SMP | 657 | int coreid = octeon_coreid_for_cpu(cpu); |
512 | int coreid = cpu_logical_map(cpu); | ||
513 | #else | ||
514 | int coreid = cvmx_get_core_num(); | ||
515 | #endif | ||
516 | int bit = (irq < OCTEON_IRQ_WDOG0) ? | 658 | int bit = (irq < OCTEON_IRQ_WDOG0) ? |
517 | irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; | 659 | irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; |
518 | if (irq < 64) { | 660 | if (irq < 64) { |
519 | isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & | 661 | isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & |
520 | (1ull << bit)) >> bit; | 662 | (1ull << bit)) >> bit; |