aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2012-04-05 13:24:25 -0400
committerDavid Daney <david.daney@cavium.com>2012-08-31 13:46:54 -0400
commit1a7e68f2c7f1e3bd6c49df031ec0eca947c35b2d (patch)
tree2576f5805f29307b199d11928d8f48bb3f4752ba /arch/mips/cavium-octeon
parent88fd85892a55730878fc081eee62553eb18f1b9c (diff)
MIPS: Octeon: Make interrupt controller work with threaded handlers.
For CIUv1 controllers, we were relying on all calls to the irq_chip functions to be done from the CPU that received the irq, and that they would all be done from interrupt contest. These assumptions do not hold for threaded handlers. We make all the masking actually mask the irq source, and use real raw_spin_locks instead of manually twiddling the Status[IE] bit. Signed-off-by: David Daney <david.daney@cavium.com>
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c280
1 files changed, 137 insertions, 143 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index aba085b2c0d5..fadec885916a 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -18,11 +18,9 @@
18#include <asm/octeon/octeon.h> 18#include <asm/octeon/octeon.h>
19#include <asm/octeon/cvmx-ciu2-defs.h> 19#include <asm/octeon/cvmx-ciu2-defs.h>
20 20
21static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
22static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
23
24static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); 21static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
25static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 22static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
23static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
26 24
27static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 25static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
28 26
@@ -234,22 +232,31 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
234 unsigned long *pen; 232 unsigned long *pen;
235 unsigned long flags; 233 unsigned long flags;
236 union octeon_ciu_chip_data cd; 234 union octeon_ciu_chip_data cd;
235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
237 236
238 cd.p = irq_data_get_irq_chip_data(data); 237 cd.p = irq_data_get_irq_chip_data(data);
239 238
239 raw_spin_lock_irqsave(lock, flags);
240 if (cd.s.line == 0) { 240 if (cd.s.line == 0) {
241 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
242 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
243 set_bit(cd.s.bit, pen); 242 __set_bit(cd.s.bit, pen);
243 /*
244 * Must be visible to octeon_irq_ip{2,3}_ciu() before
245 * enabling the irq.
246 */
247 wmb();
244 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
245 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
246 } else { 249 } else {
247 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
248 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
249 set_bit(cd.s.bit, pen); 251 __set_bit(cd.s.bit, pen);
252 /*
253 * Must be visible to octeon_irq_ip{2,3}_ciu() before
254 * enabling the irq.
255 */
256 wmb();
250 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 257 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
251 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
252 } 258 }
259 raw_spin_unlock_irqrestore(lock, flags);
253} 260}
254 261
255static void octeon_irq_ciu_enable_local(struct irq_data *data) 262static void octeon_irq_ciu_enable_local(struct irq_data *data)
@@ -257,22 +264,31 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
257 unsigned long *pen; 264 unsigned long *pen;
258 unsigned long flags; 265 unsigned long flags;
259 union octeon_ciu_chip_data cd; 266 union octeon_ciu_chip_data cd;
267 raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock);
260 268
261 cd.p = irq_data_get_irq_chip_data(data); 269 cd.p = irq_data_get_irq_chip_data(data);
262 270
271 raw_spin_lock_irqsave(lock, flags);
263 if (cd.s.line == 0) { 272 if (cd.s.line == 0) {
264 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
265 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 273 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
266 set_bit(cd.s.bit, pen); 274 __set_bit(cd.s.bit, pen);
275 /*
276 * Must be visible to octeon_irq_ip{2,3}_ciu() before
277 * enabling the irq.
278 */
279 wmb();
267 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
268 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
269 } else { 281 } else {
270 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
271 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 282 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
272 set_bit(cd.s.bit, pen); 283 __set_bit(cd.s.bit, pen);
284 /*
285 * Must be visible to octeon_irq_ip{2,3}_ciu() before
286 * enabling the irq.
287 */
288 wmb();
273 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 289 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
274 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
275 } 290 }
291 raw_spin_unlock_irqrestore(lock, flags);
276} 292}
277 293
278static void octeon_irq_ciu_disable_local(struct irq_data *data) 294static void octeon_irq_ciu_disable_local(struct irq_data *data)
@@ -280,22 +296,31 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
280 unsigned long *pen; 296 unsigned long *pen;
281 unsigned long flags; 297 unsigned long flags;
282 union octeon_ciu_chip_data cd; 298 union octeon_ciu_chip_data cd;
299 raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock);
283 300
284 cd.p = irq_data_get_irq_chip_data(data); 301 cd.p = irq_data_get_irq_chip_data(data);
285 302
303 raw_spin_lock_irqsave(lock, flags);
286 if (cd.s.line == 0) { 304 if (cd.s.line == 0) {
287 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
288 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 305 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
289 clear_bit(cd.s.bit, pen); 306 __clear_bit(cd.s.bit, pen);
307 /*
308 * Must be visible to octeon_irq_ip{2,3}_ciu() before
309 * enabling the irq.
310 */
311 wmb();
290 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
291 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
292 } else { 313 } else {
293 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
294 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 314 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
295 clear_bit(cd.s.bit, pen); 315 __clear_bit(cd.s.bit, pen);
316 /*
317 * Must be visible to octeon_irq_ip{2,3}_ciu() before
318 * enabling the irq.
319 */
320 wmb();
296 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 321 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
297 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
298 } 322 }
323 raw_spin_unlock_irqrestore(lock, flags);
299} 324}
300 325
301static void octeon_irq_ciu_disable_all(struct irq_data *data) 326static void octeon_irq_ciu_disable_all(struct irq_data *data)
@@ -304,29 +329,30 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data)
304 unsigned long *pen; 329 unsigned long *pen;
305 int cpu; 330 int cpu;
306 union octeon_ciu_chip_data cd; 331 union octeon_ciu_chip_data cd;
307 332 raw_spinlock_t *lock;
308 wmb(); /* Make sure flag changes arrive before register updates. */
309 333
310 cd.p = irq_data_get_irq_chip_data(data); 334 cd.p = irq_data_get_irq_chip_data(data);
311 335
312 if (cd.s.line == 0) { 336 for_each_online_cpu(cpu) {
313 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 337 int coreid = octeon_coreid_for_cpu(cpu);
314 for_each_online_cpu(cpu) { 338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
315 int coreid = octeon_coreid_for_cpu(cpu); 339 if (cd.s.line == 0)
316 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
317 clear_bit(cd.s.bit, pen); 341 else
318 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
319 }
320 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
321 } else {
322 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
323 for_each_online_cpu(cpu) {
324 int coreid = octeon_coreid_for_cpu(cpu);
325 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
326 clear_bit(cd.s.bit, pen); 343
344 raw_spin_lock_irqsave(lock, flags);
345 __clear_bit(cd.s.bit, pen);
346 /*
347 * Must be visible to octeon_irq_ip{2,3}_ciu() before
348 * enabling the irq.
349 */
350 wmb();
351 if (cd.s.line == 0)
352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
353 else
327 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
328 } 355 raw_spin_unlock_irqrestore(lock, flags);
329 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
330 } 356 }
331} 357}
332 358
@@ -336,27 +362,30 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data)
336 unsigned long *pen; 362 unsigned long *pen;
337 int cpu; 363 int cpu;
338 union octeon_ciu_chip_data cd; 364 union octeon_ciu_chip_data cd;
365 raw_spinlock_t *lock;
339 366
340 cd.p = irq_data_get_irq_chip_data(data); 367 cd.p = irq_data_get_irq_chip_data(data);
341 368
342 if (cd.s.line == 0) { 369 for_each_online_cpu(cpu) {
343 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 370 int coreid = octeon_coreid_for_cpu(cpu);
344 for_each_online_cpu(cpu) { 371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
345 int coreid = octeon_coreid_for_cpu(cpu); 372 if (cd.s.line == 0)
346 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
347 set_bit(cd.s.bit, pen); 374 else
348 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
349 }
350 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
351 } else {
352 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
353 for_each_online_cpu(cpu) {
354 int coreid = octeon_coreid_for_cpu(cpu);
355 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
356 set_bit(cd.s.bit, pen); 376
377 raw_spin_lock_irqsave(lock, flags);
378 __set_bit(cd.s.bit, pen);
379 /*
380 * Must be visible to octeon_irq_ip{2,3}_ciu() before
381 * enabling the irq.
382 */
383 wmb();
384 if (cd.s.line == 0)
385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
386 else
357 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
358 } 388 raw_spin_unlock_irqrestore(lock, flags);
359 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
360 } 389 }
361} 390}
362 391
@@ -459,8 +488,6 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
459 u64 mask; 488 u64 mask;
460 union octeon_ciu_chip_data cd; 489 union octeon_ciu_chip_data cd;
461 490
462 wmb(); /* Make sure flag changes arrive before register updates. */
463
464 cd.p = irq_data_get_irq_chip_data(data); 491 cd.p = irq_data_get_irq_chip_data(data);
465 mask = 1ull << (cd.s.bit); 492 mask = 1ull << (cd.s.bit);
466 493
@@ -618,6 +645,8 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
618 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
619 unsigned long flags; 646 unsigned long flags;
620 union octeon_ciu_chip_data cd; 647 union octeon_ciu_chip_data cd;
648 unsigned long *pen;
649 raw_spinlock_t *lock;
621 650
622 cd.p = irq_data_get_irq_chip_data(data); 651 cd.p = irq_data_get_irq_chip_data(data);
623 652
@@ -632,36 +661,36 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
632 if (!enable_one) 661 if (!enable_one)
633 return 0; 662 return 0;
634 663
635 if (cd.s.line == 0) {
636 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
637 for_each_online_cpu(cpu) {
638 int coreid = octeon_coreid_for_cpu(cpu);
639 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
640 664
641 if (cpumask_test_cpu(cpu, dest) && enable_one) { 665 for_each_online_cpu(cpu) {
642 enable_one = false; 666 int coreid = octeon_coreid_for_cpu(cpu);
643 set_bit(cd.s.bit, pen); 667
644 } else { 668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
645 clear_bit(cd.s.bit, pen); 669 raw_spin_lock_irqsave(lock, flags);
646 } 670
647 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 671 if (cd.s.line == 0)
672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
673 else
674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
675
676 if (cpumask_test_cpu(cpu, dest) && enable_one) {
677 enable_one = 0;
678 __set_bit(cd.s.bit, pen);
679 } else {
680 __clear_bit(cd.s.bit, pen);
648 } 681 }
649 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 682 /*
650 } else { 683 * Must be visible to octeon_irq_ip{2,3}_ciu() before
651 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 684 * enabling the irq.
652 for_each_online_cpu(cpu) { 685 */
653 int coreid = octeon_coreid_for_cpu(cpu); 686 wmb();
654 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
655 687
656 if (cpumask_test_cpu(cpu, dest) && enable_one) { 688 if (cd.s.line == 0)
657 enable_one = false; 689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
658 set_bit(cd.s.bit, pen); 690 else
659 } else {
660 clear_bit(cd.s.bit, pen);
661 }
662 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
663 } 692
664 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 693 raw_spin_unlock_irqrestore(lock, flags);
665 } 694 }
666 return 0; 695 return 0;
667} 696}
@@ -717,14 +746,6 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
717#endif 746#endif
718 747
719/* 748/*
720 * The v1 CIU code already masks things, so supply a dummy version to
721 * the core chip code.
722 */
723static void octeon_irq_dummy_mask(struct irq_data *data)
724{
725}
726
727/*
728 * Newer octeon chips have support for lockless CIU operation. 749 * Newer octeon chips have support for lockless CIU operation.
729 */ 750 */
730static struct irq_chip octeon_irq_chip_ciu_v2 = { 751static struct irq_chip octeon_irq_chip_ciu_v2 = {
@@ -745,7 +766,8 @@ static struct irq_chip octeon_irq_chip_ciu = {
745 .irq_enable = octeon_irq_ciu_enable, 766 .irq_enable = octeon_irq_ciu_enable,
746 .irq_disable = octeon_irq_ciu_disable_all, 767 .irq_disable = octeon_irq_ciu_disable_all,
747 .irq_ack = octeon_irq_ciu_ack, 768 .irq_ack = octeon_irq_ciu_ack,
748 .irq_mask = octeon_irq_dummy_mask, 769 .irq_mask = octeon_irq_ciu_disable_local,
770 .irq_unmask = octeon_irq_ciu_enable,
749#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
750 .irq_set_affinity = octeon_irq_ciu_set_affinity, 772 .irq_set_affinity = octeon_irq_ciu_set_affinity,
751 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 773 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
@@ -769,6 +791,8 @@ static struct irq_chip octeon_irq_chip_ciu_mbox = {
769 .name = "CIU-M", 791 .name = "CIU-M",
770 .irq_enable = octeon_irq_ciu_enable_all, 792 .irq_enable = octeon_irq_ciu_enable_all,
771 .irq_disable = octeon_irq_ciu_disable_all, 793 .irq_disable = octeon_irq_ciu_disable_all,
794 .irq_ack = octeon_irq_ciu_disable_local,
795 .irq_eoi = octeon_irq_ciu_enable_local,
772 796
773 .irq_cpu_online = octeon_irq_ciu_enable_local, 797 .irq_cpu_online = octeon_irq_ciu_enable_local,
774 .irq_cpu_offline = octeon_irq_ciu_disable_local, 798 .irq_cpu_offline = octeon_irq_ciu_disable_local,
@@ -793,7 +817,8 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = {
793 .name = "CIU-GPIO", 817 .name = "CIU-GPIO",
794 .irq_enable = octeon_irq_ciu_enable_gpio, 818 .irq_enable = octeon_irq_ciu_enable_gpio,
795 .irq_disable = octeon_irq_ciu_disable_gpio, 819 .irq_disable = octeon_irq_ciu_disable_gpio,
796 .irq_mask = octeon_irq_dummy_mask, 820 .irq_mask = octeon_irq_ciu_disable_local,
821 .irq_unmask = octeon_irq_ciu_enable,
797 .irq_ack = octeon_irq_ciu_gpio_ack, 822 .irq_ack = octeon_irq_ciu_gpio_ack,
798 .irq_set_type = octeon_irq_ciu_gpio_set_type, 823 .irq_set_type = octeon_irq_ciu_gpio_set_type,
799#ifdef CONFIG_SMP 824#ifdef CONFIG_SMP
@@ -812,12 +837,18 @@ static void octeon_irq_ciu_wd_enable(struct irq_data *data)
812 unsigned long *pen; 837 unsigned long *pen;
813 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 838 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
814 int cpu = octeon_cpu_for_coreid(coreid); 839 int cpu = octeon_cpu_for_coreid(coreid);
840 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
815 841
816 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 842 raw_spin_lock_irqsave(lock, flags);
817 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 843 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
818 set_bit(coreid, pen); 844 __set_bit(coreid, pen);
845 /*
846 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
847 * the irq.
848 */
849 wmb();
819 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 850 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
820 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 851 raw_spin_unlock_irqrestore(lock, flags);
821} 852}
822 853
823/* 854/*
@@ -846,7 +877,8 @@ static struct irq_chip octeon_irq_chip_ciu_wd = {
846 .name = "CIU-W", 877 .name = "CIU-W",
847 .irq_enable = octeon_irq_ciu_wd_enable, 878 .irq_enable = octeon_irq_ciu_wd_enable,
848 .irq_disable = octeon_irq_ciu_disable_all, 879 .irq_disable = octeon_irq_ciu_disable_all,
849 .irq_mask = octeon_irq_dummy_mask, 880 .irq_mask = octeon_irq_ciu_disable_local,
881 .irq_unmask = octeon_irq_ciu_enable_local,
850}; 882};
851 883
852static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) 884static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
@@ -1027,27 +1059,7 @@ static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1027 .xlate = octeon_irq_gpio_xlat, 1059 .xlate = octeon_irq_gpio_xlat,
1028}; 1060};
1029 1061
1030static void octeon_irq_ip2_v1(void) 1062static void octeon_irq_ip2_ciu(void)
1031{
1032 const unsigned long core_id = cvmx_get_core_num();
1033 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1034
1035 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
1036 clear_c0_status(STATUSF_IP2);
1037 if (likely(ciu_sum)) {
1038 int bit = fls64(ciu_sum) - 1;
1039 int irq = octeon_irq_ciu_to_irq[0][bit];
1040 if (likely(irq))
1041 do_IRQ(irq);
1042 else
1043 spurious_interrupt();
1044 } else {
1045 spurious_interrupt();
1046 }
1047 set_c0_status(STATUSF_IP2);
1048}
1049
1050static void octeon_irq_ip2_v2(void)
1051{ 1063{
1052 const unsigned long core_id = cvmx_get_core_num(); 1064 const unsigned long core_id = cvmx_get_core_num();
1053 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 1065 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
@@ -1064,26 +1076,8 @@ static void octeon_irq_ip2_v2(void)
1064 spurious_interrupt(); 1076 spurious_interrupt();
1065 } 1077 }
1066} 1078}
1067static void octeon_irq_ip3_v1(void)
1068{
1069 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1070 1079
1071 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); 1080static void octeon_irq_ip3_ciu(void)
1072 clear_c0_status(STATUSF_IP3);
1073 if (likely(ciu_sum)) {
1074 int bit = fls64(ciu_sum) - 1;
1075 int irq = octeon_irq_ciu_to_irq[1][bit];
1076 if (likely(irq))
1077 do_IRQ(irq);
1078 else
1079 spurious_interrupt();
1080 } else {
1081 spurious_interrupt();
1082 }
1083 set_c0_status(STATUSF_IP3);
1084}
1085
1086static void octeon_irq_ip3_v2(void)
1087{ 1081{
1088 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 1082 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1089 1083
@@ -1134,6 +1128,12 @@ static void __cpuinit octeon_irq_percpu_enable(void)
1134static void __cpuinit octeon_irq_init_ciu_percpu(void) 1128static void __cpuinit octeon_irq_init_ciu_percpu(void)
1135{ 1129{
1136 int coreid = cvmx_get_core_num(); 1130 int coreid = cvmx_get_core_num();
1131
1132
1133 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
1134 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
1135 wmb();
1136 raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock));
1137 /* 1137 /*
1138 * Disable All CIU Interrupts. The ones we need will be 1138 * Disable All CIU Interrupts. The ones we need will be
1139 * enabled later. Read the SUM register so we know the write 1139 * enabled later. Read the SUM register so we know the write
@@ -1170,10 +1170,6 @@ static void octeon_irq_init_ciu2_percpu(void)
1170 1170
1171static void __cpuinit octeon_irq_setup_secondary_ciu(void) 1171static void __cpuinit octeon_irq_setup_secondary_ciu(void)
1172{ 1172{
1173
1174 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
1175 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
1176
1177 octeon_irq_init_ciu_percpu(); 1173 octeon_irq_init_ciu_percpu();
1178 octeon_irq_percpu_enable(); 1174 octeon_irq_percpu_enable();
1179 1175
@@ -1208,19 +1204,17 @@ static void __init octeon_irq_init_ciu(void)
1208 octeon_irq_init_ciu_percpu(); 1204 octeon_irq_init_ciu_percpu();
1209 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1205 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1210 1206
1207 octeon_irq_ip2 = octeon_irq_ip2_ciu;
1208 octeon_irq_ip3 = octeon_irq_ip3_ciu;
1211 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1209 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1212 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1210 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1213 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1211 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1214 OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 1212 OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
1215 octeon_irq_ip2 = octeon_irq_ip2_v2;
1216 octeon_irq_ip3 = octeon_irq_ip3_v2;
1217 chip = &octeon_irq_chip_ciu_v2; 1213 chip = &octeon_irq_chip_ciu_v2;
1218 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1214 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1219 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1215 chip_wd = &octeon_irq_chip_ciu_wd_v2;
1220 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1216 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
1221 } else { 1217 } else {
1222 octeon_irq_ip2 = octeon_irq_ip2_v1;
1223 octeon_irq_ip3 = octeon_irq_ip3_v1;
1224 chip = &octeon_irq_chip_ciu; 1218 chip = &octeon_irq_chip_ciu;
1225 chip_mbox = &octeon_irq_chip_ciu_mbox; 1219 chip_mbox = &octeon_irq_chip_ciu_mbox;
1226 chip_wd = &octeon_irq_chip_ciu_wd; 1220 chip_wd = &octeon_irq_chip_ciu_wd;