aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon/octeon-irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/cavium-octeon/octeon-irq.c')
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c224
1 files changed, 184 insertions, 40 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 384f1842bfb1..6f2acf09328d 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -17,6 +17,15 @@ DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17DEFINE_RWLOCK(octeon_irq_ciu1_rwlock); 17DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
18DEFINE_SPINLOCK(octeon_irq_msi_lock); 18DEFINE_SPINLOCK(octeon_irq_msi_lock);
19 19
20static int octeon_coreid_for_cpu(int cpu)
21{
22#ifdef CONFIG_SMP
23 return cpu_logical_map(cpu);
24#else
25 return cvmx_get_core_num();
26#endif
27}
28
20static void octeon_irq_core_ack(unsigned int irq) 29static void octeon_irq_core_ack(unsigned int irq)
21{ 30{
22 unsigned int bit = irq - OCTEON_IRQ_SW0; 31 unsigned int bit = irq - OCTEON_IRQ_SW0;
@@ -152,11 +161,10 @@ static void octeon_irq_ciu0_disable(unsigned int irq)
152 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 161 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
153 unsigned long flags; 162 unsigned long flags;
154 uint64_t en0; 163 uint64_t en0;
155#ifdef CONFIG_SMP
156 int cpu; 164 int cpu;
157 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags); 165 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
158 for_each_online_cpu(cpu) { 166 for_each_online_cpu(cpu) {
159 int coreid = cpu_logical_map(cpu); 167 int coreid = octeon_coreid_for_cpu(cpu);
160 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 168 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
161 en0 &= ~(1ull << bit); 169 en0 &= ~(1ull << bit);
162 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 170 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
@@ -167,26 +175,57 @@ static void octeon_irq_ciu0_disable(unsigned int irq)
167 */ 175 */
168 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); 176 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
169 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags); 177 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
170#else 178}
171 int coreid = cvmx_get_core_num(); 179
172 local_irq_save(flags); 180/*
173 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 181 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
174 en0 &= ~(1ull << bit); 182 * registers.
175 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 183 */
176 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 184static void octeon_irq_ciu0_enable_v2(unsigned int irq)
177 local_irq_restore(flags); 185{
178#endif 186 int index = cvmx_get_core_num() * 2;
187 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
188
189 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
190}
191
192/*
193 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
194 * registers.
195 */
196static void octeon_irq_ciu0_disable_v2(unsigned int irq)
197{
198 int index = cvmx_get_core_num() * 2;
199 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
200
201 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
202}
203
204/*
205 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
206 * registers.
207 */
208static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
209{
210 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
211 int index;
212 int cpu;
213 for_each_online_cpu(cpu) {
214 index = octeon_coreid_for_cpu(cpu) * 2;
215 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
216 }
179} 217}
180 218
181#ifdef CONFIG_SMP 219#ifdef CONFIG_SMP
182static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) 220static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
183{ 221{
184 int cpu; 222 int cpu;
223 unsigned long flags;
185 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 224 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
186 225
187 write_lock(&octeon_irq_ciu0_rwlock); 226 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
188 for_each_online_cpu(cpu) { 227 for_each_online_cpu(cpu) {
189 int coreid = cpu_logical_map(cpu); 228 int coreid = octeon_coreid_for_cpu(cpu);
190 uint64_t en0 = 229 uint64_t en0 =
191 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 230 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
192 if (cpumask_test_cpu(cpu, dest)) 231 if (cpumask_test_cpu(cpu, dest))
@@ -200,11 +239,45 @@ static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *
200 * of them are done. 239 * of them are done.
201 */ 240 */
202 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2)); 241 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
203 write_unlock(&octeon_irq_ciu0_rwlock); 242 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
204 243
205 return 0; 244 return 0;
206} 245}
246
247/*
248 * Set affinity for the irq for chips that have the EN*_W1{S,C}
249 * registers.
250 */
251static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
252 const struct cpumask *dest)
253{
254 int cpu;
255 int index;
256 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
257 for_each_online_cpu(cpu) {
258 index = octeon_coreid_for_cpu(cpu) * 2;
259 if (cpumask_test_cpu(cpu, dest))
260 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
261 else
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
263 }
264 return 0;
265}
266#endif
267
268/*
269 * Newer octeon chips have support for lockless CIU operation.
270 */
271static struct irq_chip octeon_irq_chip_ciu0_v2 = {
272 .name = "CIU0",
273 .enable = octeon_irq_ciu0_enable_v2,
274 .disable = octeon_irq_ciu0_disable_all_v2,
275 .ack = octeon_irq_ciu0_disable_v2,
276 .eoi = octeon_irq_ciu0_enable_v2,
277#ifdef CONFIG_SMP
278 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
207#endif 279#endif
280};
208 281
209static struct irq_chip octeon_irq_chip_ciu0 = { 282static struct irq_chip octeon_irq_chip_ciu0 = {
210 .name = "CIU0", 283 .name = "CIU0",
@@ -269,11 +342,10 @@ static void octeon_irq_ciu1_disable(unsigned int irq)
269 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 342 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
270 unsigned long flags; 343 unsigned long flags;
271 uint64_t en1; 344 uint64_t en1;
272#ifdef CONFIG_SMP
273 int cpu; 345 int cpu;
274 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags); 346 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
275 for_each_online_cpu(cpu) { 347 for_each_online_cpu(cpu) {
276 int coreid = cpu_logical_map(cpu); 348 int coreid = octeon_coreid_for_cpu(cpu);
277 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 349 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
278 en1 &= ~(1ull << bit); 350 en1 &= ~(1ull << bit);
279 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 351 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
@@ -284,26 +356,58 @@ static void octeon_irq_ciu1_disable(unsigned int irq)
284 */ 356 */
285 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 357 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
286 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags); 358 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
287#else 359}
288 int coreid = cvmx_get_core_num(); 360
289 local_irq_save(flags); 361/*
290 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 362 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
291 en1 &= ~(1ull << bit); 363 * registers.
292 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 364 */
293 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 365static void octeon_irq_ciu1_enable_v2(unsigned int irq)
294 local_irq_restore(flags); 366{
295#endif 367 int index = cvmx_get_core_num() * 2 + 1;
368 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
369
370 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
371}
372
373/*
374 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
375 * registers.
376 */
377static void octeon_irq_ciu1_disable_v2(unsigned int irq)
378{
379 int index = cvmx_get_core_num() * 2 + 1;
380 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
381
382 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
383}
384
385/*
386 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
387 * registers.
388 */
389static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
390{
391 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
392 int index;
393 int cpu;
394 for_each_online_cpu(cpu) {
395 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
396 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
397 }
296} 398}
297 399
298#ifdef CONFIG_SMP 400#ifdef CONFIG_SMP
299static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest) 401static int octeon_irq_ciu1_set_affinity(unsigned int irq,
402 const struct cpumask *dest)
300{ 403{
301 int cpu; 404 int cpu;
405 unsigned long flags;
302 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 406 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
303 407
304 write_lock(&octeon_irq_ciu1_rwlock); 408 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
305 for_each_online_cpu(cpu) { 409 for_each_online_cpu(cpu) {
306 int coreid = cpu_logical_map(cpu); 410 int coreid = octeon_coreid_for_cpu(cpu);
307 uint64_t en1 = 411 uint64_t en1 =
308 cvmx_read_csr(CVMX_CIU_INTX_EN1 412 cvmx_read_csr(CVMX_CIU_INTX_EN1
309 (coreid * 2 + 1)); 413 (coreid * 2 + 1));
@@ -318,12 +422,46 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *
318 * of them are done. 422 * of them are done.
319 */ 423 */
320 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1)); 424 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
321 write_unlock(&octeon_irq_ciu1_rwlock); 425 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
426
427 return 0;
428}
322 429
430/*
431 * Set affinity for the irq for chips that have the EN*_W1{S,C}
432 * registers.
433 */
434static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
435 const struct cpumask *dest)
436{
437 int cpu;
438 int index;
439 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
440 for_each_online_cpu(cpu) {
441 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
442 if (cpumask_test_cpu(cpu, dest))
443 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
444 else
445 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
446 }
323 return 0; 447 return 0;
324} 448}
325#endif 449#endif
326 450
451/*
452 * Newer octeon chips have support for lockless CIU operation.
453 */
454static struct irq_chip octeon_irq_chip_ciu1_v2 = {
455 .name = "CIU0",
456 .enable = octeon_irq_ciu1_enable_v2,
457 .disable = octeon_irq_ciu1_disable_all_v2,
458 .ack = octeon_irq_ciu1_disable_v2,
459 .eoi = octeon_irq_ciu1_enable_v2,
460#ifdef CONFIG_SMP
461 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
462#endif
463};
464
327static struct irq_chip octeon_irq_chip_ciu1 = { 465static struct irq_chip octeon_irq_chip_ciu1 = {
328 .name = "CIU1", 466 .name = "CIU1",
329 .enable = octeon_irq_ciu1_enable, 467 .enable = octeon_irq_ciu1_enable,
@@ -420,6 +558,8 @@ static struct irq_chip octeon_irq_chip_msi = {
420void __init arch_init_irq(void) 558void __init arch_init_irq(void)
421{ 559{
422 int irq; 560 int irq;
561 struct irq_chip *chip0;
562 struct irq_chip *chip1;
423 563
424#ifdef CONFIG_SMP 564#ifdef CONFIG_SMP
425 /* Set the default affinity to the boot cpu. */ 565 /* Set the default affinity to the boot cpu. */
@@ -430,6 +570,16 @@ void __init arch_init_irq(void)
430 if (NR_IRQS < OCTEON_IRQ_LAST) 570 if (NR_IRQS < OCTEON_IRQ_LAST)
431 pr_err("octeon_irq_init: NR_IRQS is set too low\n"); 571 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
432 572
573 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
574 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
575 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
576 chip0 = &octeon_irq_chip_ciu0_v2;
577 chip1 = &octeon_irq_chip_ciu1_v2;
578 } else {
579 chip0 = &octeon_irq_chip_ciu0;
580 chip1 = &octeon_irq_chip_ciu1;
581 }
582
433 /* 0 - 15 reserved for i8259 master and slave controller. */ 583 /* 0 - 15 reserved for i8259 master and slave controller. */
434 584
435 /* 17 - 23 Mips internal */ 585 /* 17 - 23 Mips internal */
@@ -440,14 +590,12 @@ void __init arch_init_irq(void)
440 590
441 /* 24 - 87 CIU_INT_SUM0 */ 591 /* 24 - 87 CIU_INT_SUM0 */
442 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 592 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
443 set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0, 593 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
444 handle_percpu_irq);
445 } 594 }
446 595
447 /* 88 - 151 CIU_INT_SUM1 */ 596 /* 88 - 151 CIU_INT_SUM1 */
448 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { 597 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
449 set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1, 598 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
450 handle_percpu_irq);
451 } 599 }
452 600
453#ifdef CONFIG_PCI_MSI 601#ifdef CONFIG_PCI_MSI
@@ -505,14 +653,10 @@ asmlinkage void plat_irq_dispatch(void)
505#ifdef CONFIG_HOTPLUG_CPU 653#ifdef CONFIG_HOTPLUG_CPU
506static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu) 654static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
507{ 655{
508 unsigned int isset; 656 unsigned int isset;
509#ifdef CONFIG_SMP 657 int coreid = octeon_coreid_for_cpu(cpu);
510 int coreid = cpu_logical_map(cpu);
511#else
512 int coreid = cvmx_get_core_num();
513#endif
514 int bit = (irq < OCTEON_IRQ_WDOG0) ? 658 int bit = (irq < OCTEON_IRQ_WDOG0) ?
515 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0; 659 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
516 if (irq < 64) { 660 if (irq < 64) {
517 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) & 661 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
518 (1ull << bit)) >> bit; 662 (1ull << bit)) >> bit;