diff options
| author | David Daney <ddaney@caviumnetworks.com> | 2010-02-18 14:47:40 -0500 |
|---|---|---|
| committer | Ralf Baechle <ralf@linux-mips.org> | 2010-02-27 06:53:40 -0500 |
| commit | 399614226cfa45ffaba45b269e8af9ddc26de537 (patch) | |
| tree | 286fda77dd50b1246c61fba69debb485decbd613 | |
| parent | 4837a661a52dd9e02cd1cdb08a7ebdc5ed028ee4 (diff) | |
MIPS: Octeon: Replace rwlocks in irq_chip handlers with raw_spinlocks.
Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Cc: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/972/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
| -rw-r--r-- | arch/mips/cavium-octeon/octeon-irq.c | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index bc985b7af4d5..c424cd158dc6 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
| @@ -13,8 +13,8 @@ | |||
| 13 | #include <asm/octeon/cvmx-pexp-defs.h> | 13 | #include <asm/octeon/cvmx-pexp-defs.h> |
| 14 | #include <asm/octeon/cvmx-npi-defs.h> | 14 | #include <asm/octeon/cvmx-npi-defs.h> |
| 15 | 15 | ||
| 16 | DEFINE_RWLOCK(octeon_irq_ciu0_rwlock); | 16 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); |
| 17 | DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); | 17 | static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); |
| 18 | 18 | ||
| 19 | static int octeon_coreid_for_cpu(int cpu) | 19 | static int octeon_coreid_for_cpu(int cpu) |
| 20 | { | 20 | { |
| @@ -137,19 +137,12 @@ static void octeon_irq_ciu0_enable(unsigned int irq) | |||
| 137 | uint64_t en0; | 137 | uint64_t en0; |
| 138 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 138 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 139 | 139 | ||
| 140 | /* | 140 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
| 141 | * A read lock is used here to make sure only one core is ever | ||
| 142 | * updating the CIU enable bits at a time. During an enable | ||
| 143 | * the cores don't interfere with each other. During a disable | ||
| 144 | * the write lock stops any enables that might cause a | ||
| 145 | * problem. | ||
| 146 | */ | ||
| 147 | read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | ||
| 148 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 141 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 149 | en0 |= 1ull << bit; | 142 | en0 |= 1ull << bit; |
| 150 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); | 143 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); |
| 151 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 144 | cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| 152 | read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 145 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
| 153 | } | 146 | } |
| 154 | 147 | ||
| 155 | static void octeon_irq_ciu0_disable(unsigned int irq) | 148 | static void octeon_irq_ciu0_disable(unsigned int irq) |
| @@ -158,7 +151,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
| 158 | unsigned long flags; | 151 | unsigned long flags; |
| 159 | uint64_t en0; | 152 | uint64_t en0; |
| 160 | int cpu; | 153 | int cpu; |
| 161 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | 154 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
| 162 | for_each_online_cpu(cpu) { | 155 | for_each_online_cpu(cpu) { |
| 163 | int coreid = octeon_coreid_for_cpu(cpu); | 156 | int coreid = octeon_coreid_for_cpu(cpu); |
| 164 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); | 157 | en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); |
| @@ -170,7 +163,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq) | |||
| 170 | * of them are done. | 163 | * of them are done. |
| 171 | */ | 164 | */ |
| 172 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | 165 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
| 173 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 166 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
| 174 | } | 167 | } |
| 175 | 168 | ||
| 176 | /* | 169 | /* |
| @@ -256,7 +249,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
| 256 | unsigned long flags; | 249 | unsigned long flags; |
| 257 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ | 250 | int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ |
| 258 | 251 | ||
| 259 | write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); | 252 | raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); |
| 260 | for_each_online_cpu(cpu) { | 253 | for_each_online_cpu(cpu) { |
| 261 | int coreid = octeon_coreid_for_cpu(cpu); | 254 | int coreid = octeon_coreid_for_cpu(cpu); |
| 262 | uint64_t en0 = | 255 | uint64_t en0 = |
| @@ -272,7 +265,7 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask * | |||
| 272 | * of them are done. | 265 | * of them are done. |
| 273 | */ | 266 | */ |
| 274 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); | 267 | cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); |
| 275 | write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); | 268 | raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); |
| 276 | 269 | ||
| 277 | return 0; | 270 | return 0; |
| 278 | } | 271 | } |
| @@ -377,19 +370,12 @@ static void octeon_irq_ciu1_enable(unsigned int irq) | |||
| 377 | uint64_t en1; | 370 | uint64_t en1; |
| 378 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 371 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 379 | 372 | ||
| 380 | /* | 373 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
| 381 | * A read lock is used here to make sure only one core is ever | ||
| 382 | * updating the CIU enable bits at a time. During an enable | ||
| 383 | * the cores don't interfere with each other. During a disable | ||
| 384 | * the write lock stops any enables that might cause a | ||
| 385 | * problem. | ||
| 386 | */ | ||
| 387 | read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | ||
| 388 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 374 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 389 | en1 |= 1ull << bit; | 375 | en1 |= 1ull << bit; |
| 390 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); | 376 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); |
| 391 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 377 | cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| 392 | read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 378 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
| 393 | } | 379 | } |
| 394 | 380 | ||
| 395 | static void octeon_irq_ciu1_disable(unsigned int irq) | 381 | static void octeon_irq_ciu1_disable(unsigned int irq) |
| @@ -398,7 +384,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
| 398 | unsigned long flags; | 384 | unsigned long flags; |
| 399 | uint64_t en1; | 385 | uint64_t en1; |
| 400 | int cpu; | 386 | int cpu; |
| 401 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | 387 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
| 402 | for_each_online_cpu(cpu) { | 388 | for_each_online_cpu(cpu) { |
| 403 | int coreid = octeon_coreid_for_cpu(cpu); | 389 | int coreid = octeon_coreid_for_cpu(cpu); |
| 404 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); | 390 | en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); |
| @@ -410,7 +396,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq) | |||
| 410 | * of them are done. | 396 | * of them are done. |
| 411 | */ | 397 | */ |
| 412 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 398 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
| 413 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 399 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
| 414 | } | 400 | } |
| 415 | 401 | ||
| 416 | /* | 402 | /* |
| @@ -474,7 +460,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, | |||
| 474 | unsigned long flags; | 460 | unsigned long flags; |
| 475 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ | 461 | int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ |
| 476 | 462 | ||
| 477 | write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); | 463 | raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); |
| 478 | for_each_online_cpu(cpu) { | 464 | for_each_online_cpu(cpu) { |
| 479 | int coreid = octeon_coreid_for_cpu(cpu); | 465 | int coreid = octeon_coreid_for_cpu(cpu); |
| 480 | uint64_t en1 = | 466 | uint64_t en1 = |
| @@ -491,7 +477,7 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, | |||
| 491 | * of them are done. | 477 | * of them are done. |
| 492 | */ | 478 | */ |
| 493 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); | 479 | cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); |
| 494 | write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); | 480 | raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); |
| 495 | 481 | ||
| 496 | return 0; | 482 | return 0; |
| 497 | } | 483 | } |
