aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-12 12:52:12 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-12-12 12:52:12 -0500
commitda8cadb31b82c9d41fc593c8deab6aa20b162d6b (patch)
tree5640a34aa485f254de503a17833046645ade47f6 /arch/sparc64/kernel/smp.c
parent02ec96be2b45d9f2712687ad107038ef390b24c2 (diff)
parent0de56d1ab83323d604d95ca193dcbd28388dbabb (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: [SPARC64]: Fix endless loop in cheetah_xcall_deliver(). [SERIAL] sparc: Infrastructure to fix section mismatch bugs.
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 894b506f9636..c39944927f1a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -476,7 +476,7 @@ static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpuma
476 */ 476 */
477static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 477static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
478{ 478{
479 u64 pstate, ver; 479 u64 pstate, ver, busy_mask;
480 int nack_busy_id, is_jbus, need_more; 480 int nack_busy_id, is_jbus, need_more;
481 481
482 if (cpus_empty(mask)) 482 if (cpus_empty(mask))
@@ -508,14 +508,20 @@ retry:
508 "i" (ASI_INTR_W)); 508 "i" (ASI_INTR_W));
509 509
510 nack_busy_id = 0; 510 nack_busy_id = 0;
511 busy_mask = 0;
511 { 512 {
512 int i; 513 int i;
513 514
514 for_each_cpu_mask(i, mask) { 515 for_each_cpu_mask(i, mask) {
515 u64 target = (i << 14) | 0x70; 516 u64 target = (i << 14) | 0x70;
516 517
517 if (!is_jbus) 518 if (is_jbus) {
519 busy_mask |= (0x1UL << (i * 2));
520 } else {
518 target |= (nack_busy_id << 24); 521 target |= (nack_busy_id << 24);
522 busy_mask |= (0x1UL <<
523 (nack_busy_id * 2));
524 }
519 __asm__ __volatile__( 525 __asm__ __volatile__(
520 "stxa %%g0, [%0] %1\n\t" 526 "stxa %%g0, [%0] %1\n\t"
521 "membar #Sync\n\t" 527 "membar #Sync\n\t"
@@ -531,15 +537,16 @@ retry:
531 537
532 /* Now, poll for completion. */ 538 /* Now, poll for completion. */
533 { 539 {
534 u64 dispatch_stat; 540 u64 dispatch_stat, nack_mask;
535 long stuck; 541 long stuck;
536 542
537 stuck = 100000 * nack_busy_id; 543 stuck = 100000 * nack_busy_id;
544 nack_mask = busy_mask << 1;
538 do { 545 do {
539 __asm__ __volatile__("ldxa [%%g0] %1, %0" 546 __asm__ __volatile__("ldxa [%%g0] %1, %0"
540 : "=r" (dispatch_stat) 547 : "=r" (dispatch_stat)
541 : "i" (ASI_INTR_DISPATCH_STAT)); 548 : "i" (ASI_INTR_DISPATCH_STAT));
542 if (dispatch_stat == 0UL) { 549 if (!(dispatch_stat & (busy_mask | nack_mask))) {
543 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 550 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
544 : : "r" (pstate)); 551 : : "r" (pstate));
545 if (unlikely(need_more)) { 552 if (unlikely(need_more)) {
@@ -556,12 +563,12 @@ retry:
556 } 563 }
557 if (!--stuck) 564 if (!--stuck)
558 break; 565 break;
559 } while (dispatch_stat & 0x5555555555555555UL); 566 } while (dispatch_stat & busy_mask);
560 567
561 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 568 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
562 : : "r" (pstate)); 569 : : "r" (pstate));
563 570
564 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) { 571 if (dispatch_stat & busy_mask) {
565 /* Busy bits will not clear, continue instead 572 /* Busy bits will not clear, continue instead
566 * of freezing up on this cpu. 573 * of freezing up on this cpu.
567 */ 574 */