diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/irq.c | 22 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 125 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 6 |
3 files changed, 151 insertions, 2 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index ff201c007e0c..c80d2531ec46 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -900,6 +900,24 @@ static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) | |||
900 | *pa_ptr = __pa(page); | 900 | *pa_ptr = __pa(page); |
901 | } | 901 | } |
902 | 902 | ||
903 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb) | ||
904 | { | ||
905 | #ifdef CONFIG_SMP | ||
906 | unsigned long page; | ||
907 | |||
908 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | ||
909 | |||
910 | page = get_zeroed_page(GFP_ATOMIC); | ||
911 | if (!page) { | ||
912 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | ||
913 | prom_halt(); | ||
914 | } | ||
915 | |||
916 | tb->cpu_mondo_block_pa = __pa(page); | ||
917 | tb->cpu_list_pa = __pa(page + 64); | ||
918 | #endif | ||
919 | } | ||
920 | |||
903 | /* Allocate and init the mondo and error queues for this cpu. */ | 921 | /* Allocate and init the mondo and error queues for this cpu. */ |
904 | void __cpuinit sun4v_init_mondo_queues(void) | 922 | void __cpuinit sun4v_init_mondo_queues(void) |
905 | { | 923 | { |
@@ -908,10 +926,14 @@ void __cpuinit sun4v_init_mondo_queues(void) | |||
908 | 926 | ||
909 | init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); | 927 | init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); |
910 | init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); | 928 | init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); |
929 | |||
911 | init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); | 930 | init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); |
912 | init_one_kbuf(&tb->resum_kernel_buf_pa); | 931 | init_one_kbuf(&tb->resum_kernel_buf_pa); |
932 | |||
913 | init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); | 933 | init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); |
914 | init_one_kbuf(&tb->nonresum_kernel_buf_pa); | 934 | init_one_kbuf(&tb->nonresum_kernel_buf_pa); |
935 | |||
936 | init_cpu_send_mondo_info(tb); | ||
915 | } | 937 | } |
916 | 938 | ||
917 | /* Only invoked on boot processor. */ | 939 | /* Only invoked on boot processor. */ |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 223cc6bd369a..c10a3a8639e8 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -531,10 +531,133 @@ retry: | |||
531 | } | 531 | } |
532 | } | 532 | } |
533 | 533 | ||
534 | #if 0 | ||
535 | /* Multi-cpu list version. */ | ||
536 | static int init_cpu_list(u16 *list, cpumask_t mask) | ||
537 | { | ||
538 | int i, cnt; | ||
539 | |||
540 | cnt = 0; | ||
541 | for_each_cpu_mask(i, mask) | ||
542 | list[cnt++] = i; | ||
543 | |||
544 | return cnt; | ||
545 | } | ||
546 | |||
547 | static int update_cpu_list(u16 *list, int orig_cnt, cpumask_t mask) | ||
548 | { | ||
549 | int i; | ||
550 | |||
551 | for (i = 0; i < orig_cnt; i++) { | ||
552 | if (list[i] == 0xffff) | ||
553 | cpu_clear(i, mask); | ||
554 | } | ||
555 | |||
556 | return init_cpu_list(list, mask); | ||
557 | } | ||
558 | |||
559 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | ||
560 | { | ||
561 | int this_cpu = get_cpu(); | ||
562 | struct trap_per_cpu *tb = &trap_block[this_cpu]; | ||
563 | u64 *mondo = __va(tb->cpu_mondo_block_pa); | ||
564 | u16 *cpu_list = __va(tb->cpu_list_pa); | ||
565 | int cnt, retries; | ||
566 | |||
567 | mondo[0] = data0; | ||
568 | mondo[1] = data1; | ||
569 | mondo[2] = data2; | ||
570 | wmb(); | ||
571 | |||
572 | retries = 0; | ||
573 | cnt = init_cpu_list(cpu_list, mask); | ||
574 | do { | ||
575 | register unsigned long func __asm__("%o0"); | ||
576 | register unsigned long arg0 __asm__("%o1"); | ||
577 | register unsigned long arg1 __asm__("%o2"); | ||
578 | register unsigned long arg2 __asm__("%o3"); | ||
579 | |||
580 | func = HV_FAST_CPU_MONDO_SEND; | ||
581 | arg0 = cnt; | ||
582 | arg1 = tb->cpu_list_pa; | ||
583 | arg2 = tb->cpu_mondo_block_pa; | ||
584 | |||
585 | __asm__ __volatile__("ta %8" | ||
586 | : "=&r" (func), "=&r" (arg0), | ||
587 | "=&r" (arg1), "=&r" (arg2) | ||
588 | : "0" (func), "1" (arg0), | ||
589 | "2" (arg1), "3" (arg2), | ||
590 | "i" (HV_FAST_TRAP) | ||
591 | : "memory"); | ||
592 | if (likely(func == HV_EOK)) | ||
593 | break; | ||
594 | |||
595 | if (unlikely(++retries > 100)) { | ||
596 | printk("CPU[%d]: sun4v mondo error %lu\n", | ||
597 | this_cpu, func); | ||
598 | break; | ||
599 | } | ||
600 | |||
601 | cnt = update_cpu_list(cpu_list, cnt, mask); | ||
602 | |||
603 | udelay(2 * cnt); | ||
604 | } while (1); | ||
605 | |||
606 | put_cpu(); | ||
607 | } | ||
608 | #else | ||
609 | /* Single-cpu list version. */ | ||
534 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) | 610 | static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) |
535 | { | 611 | { |
536 | /* XXX implement me */ | 612 | int this_cpu = get_cpu(); |
613 | struct trap_per_cpu *tb = &trap_block[this_cpu]; | ||
614 | u64 *mondo = __va(tb->cpu_mondo_block_pa); | ||
615 | u16 *cpu_list = __va(tb->cpu_list_pa); | ||
616 | int i; | ||
617 | |||
618 | mondo[0] = data0; | ||
619 | mondo[1] = data1; | ||
620 | mondo[2] = data2; | ||
621 | wmb(); | ||
622 | |||
623 | for_each_cpu_mask(i, mask) { | ||
624 | int retries = 0; | ||
625 | |||
626 | do { | ||
627 | register unsigned long func __asm__("%o0"); | ||
628 | register unsigned long arg0 __asm__("%o1"); | ||
629 | register unsigned long arg1 __asm__("%o2"); | ||
630 | register unsigned long arg2 __asm__("%o3"); | ||
631 | |||
632 | cpu_list[0] = i; | ||
633 | func = HV_FAST_CPU_MONDO_SEND; | ||
634 | arg0 = 1; | ||
635 | arg1 = tb->cpu_list_pa; | ||
636 | arg2 = tb->cpu_mondo_block_pa; | ||
637 | |||
638 | __asm__ __volatile__("ta %8" | ||
639 | : "=&r" (func), "=&r" (arg0), | ||
640 | "=&r" (arg1), "=&r" (arg2) | ||
641 | : "0" (func), "1" (arg0), | ||
642 | "2" (arg1), "3" (arg2), | ||
643 | "i" (HV_FAST_TRAP) | ||
644 | : "memory"); | ||
645 | if (likely(func == HV_EOK)) | ||
646 | break; | ||
647 | |||
648 | if (unlikely(++retries > 100)) { | ||
649 | printk("CPU[%d]: sun4v mondo error %lu\n", | ||
650 | this_cpu, func); | ||
651 | break; | ||
652 | } | ||
653 | |||
654 | udelay(2 * i); | ||
655 | } while (1); | ||
656 | } | ||
657 | |||
658 | put_cpu(); | ||
537 | } | 659 | } |
660 | #endif | ||
538 | 661 | ||
539 | /* Send cross call to all processors mentioned in MASK | 662 | /* Send cross call to all processors mentioned in MASK |
540 | * except self. | 663 | * except self. |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 5417ff1b9345..ac171161e794 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -2377,7 +2377,11 @@ void __init trap_init(void) | |||
2377 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != | 2377 | (TRAP_PER_CPU_NONRESUM_KBUF_PA != |
2378 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || | 2378 | offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) || |
2379 | (TRAP_PER_CPU_FAULT_INFO != | 2379 | (TRAP_PER_CPU_FAULT_INFO != |
2380 | offsetof(struct trap_per_cpu, fault_info))) | 2380 | offsetof(struct trap_per_cpu, fault_info)) || |
2381 | (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != | ||
2382 | offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || | ||
2383 | (TRAP_PER_CPU_CPU_LIST_PA != | ||
2384 | offsetof(struct trap_per_cpu, cpu_list_pa))) | ||
2381 | trap_per_cpu_offsets_are_bolixed_dave(); | 2385 | trap_per_cpu_offsets_are_bolixed_dave(); |
2382 | 2386 | ||
2383 | /* Attach to the address space of init_task. On SMP we | 2387 | /* Attach to the address space of init_task. On SMP we |