diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-02-12 02:07:13 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:12:26 -0500 |
commit | b5a37e96b8dc067b979e44c4e109c9bc49c2f4d8 (patch) | |
tree | 2dcb1bf2927598c2fe9064f82174d7ecd445341a /arch/sparc64 | |
parent | c4bce90ea2069e5a87beac806de3090ab32128d5 (diff) |
[SPARC64]: Fix mondo queue allocations.
We have to use bootmem during init_IRQ and page alloc
for sibling cpu calls.
Also, fix incorrect hypervisor call return value
checks in the hypervisor SMP cpu mondo send code.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/irq.c | 83 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/trampoline.S | 2 |
3 files changed, 59 insertions, 30 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c5dd6daf127f..51f65054bf18 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/bootmem.h> | ||
24 | 25 | ||
25 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
@@ -861,24 +862,16 @@ void init_irqwork_curcpu(void) | |||
861 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); | 862 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); |
862 | } | 863 | } |
863 | 864 | ||
864 | static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) | 865 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) |
865 | { | 866 | { |
866 | register unsigned long func __asm__("%o5"); | 867 | register unsigned long func __asm__("%o5"); |
867 | register unsigned long arg0 __asm__("%o0"); | 868 | register unsigned long arg0 __asm__("%o0"); |
868 | register unsigned long arg1 __asm__("%o1"); | 869 | register unsigned long arg1 __asm__("%o1"); |
869 | register unsigned long arg2 __asm__("%o2"); | 870 | register unsigned long arg2 __asm__("%o2"); |
870 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | ||
871 | |||
872 | if (!page) { | ||
873 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
874 | prom_halt(); | ||
875 | } | ||
876 | |||
877 | *pa_ptr = __pa(page); | ||
878 | 871 | ||
879 | func = HV_FAST_CPU_QCONF; | 872 | func = HV_FAST_CPU_QCONF; |
880 | arg0 = type; | 873 | arg0 = type; |
881 | arg1 = *pa_ptr; | 874 | arg1 = paddr; |
882 | arg2 = 128; /* XXX Implied by Niagara queue offsets. XXX */ | 875 | arg2 = 128; /* XXX Implied by Niagara queue offsets. XXX */ |
883 | __asm__ __volatile__("ta %8" | 876 | __asm__ __volatile__("ta %8" |
884 | : "=&r" (func), "=&r" (arg0), | 877 | : "=&r" (func), "=&r" (arg0), |
@@ -887,16 +880,48 @@ static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type) | |||
887 | "2" (arg1), "3" (arg2), | 880 | "2" (arg1), "3" (arg2), |
888 | "i" (HV_FAST_TRAP)); | 881 | "i" (HV_FAST_TRAP)); |
889 | 882 | ||
890 | if (func != HV_EOK) { | 883 | if (arg0 != HV_EOK) { |
891 | prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n", | 884 | prom_printf("SUN4V: cpu_qconf(%lu) failed with error %lu\n", |
892 | type, func); | 885 | type, func); |
893 | prom_halt(); | 886 | prom_halt(); |
894 | } | 887 | } |
895 | } | 888 | } |
896 | 889 | ||
897 | static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) | 890 | static void __cpuinit sun4v_register_mondo_queues(int this_cpu) |
898 | { | 891 | { |
899 | unsigned long page = get_zeroed_page(GFP_ATOMIC); | 892 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
893 | |||
894 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); | ||
895 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); | ||
896 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); | ||
897 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); | ||
898 | } | ||
899 | |||
900 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) | ||
901 | { | ||
902 | void *page; | ||
903 | |||
904 | if (use_bootmem) | ||
905 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
906 | else | ||
907 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
908 | |||
909 | if (!page) { | ||
910 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | ||
911 | prom_halt(); | ||
912 | } | ||
913 | |||
914 | *pa_ptr = __pa(page); | ||
915 | } | ||
916 | |||
917 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) | ||
918 | { | ||
919 | void *page; | ||
920 | |||
921 | if (use_bootmem) | ||
922 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
923 | else | ||
924 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
900 | 925 | ||
901 | if (!page) { | 926 | if (!page) { |
902 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | 927 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); |
@@ -906,14 +931,18 @@ static void __cpuinit init_one_kbuf(unsigned long *pa_ptr) | |||
906 | *pa_ptr = __pa(page); | 931 | *pa_ptr = __pa(page); |
907 | } | 932 | } |
908 | 933 | ||
909 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb) | 934 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) |
910 | { | 935 | { |
911 | #ifdef CONFIG_SMP | 936 | #ifdef CONFIG_SMP |
912 | unsigned long page; | 937 | void *page; |
913 | 938 | ||
914 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | 939 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); |
915 | 940 | ||
916 | page = get_zeroed_page(GFP_ATOMIC); | 941 | if (use_bootmem) |
942 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
943 | else | ||
944 | page = (void *) get_zeroed_page(GFP_ATOMIC); | ||
945 | |||
917 | if (!page) { | 946 | if (!page) { |
918 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | 947 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); |
919 | prom_halt(); | 948 | prom_halt(); |
@@ -924,22 +953,22 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb) | |||
924 | #endif | 953 | #endif |
925 | } | 954 | } |
926 | 955 | ||
927 | /* Allocate and init the mondo and error queues for this cpu. */ | 956 | /* Allocate and register the mondo and error queues for this cpu. */ |
928 | void __cpuinit sun4v_init_mondo_queues(void) | 957 | void __cpuinit sun4v_init_mondo_queues(int use_bootmem) |
929 | { | 958 | { |
930 | int cpu = hard_smp_processor_id(); | 959 | int cpu = hard_smp_processor_id(); |
931 | struct trap_per_cpu *tb = &trap_block[cpu]; | 960 | struct trap_per_cpu *tb = &trap_block[cpu]; |
932 | 961 | ||
933 | init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); | 962 | alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); |
934 | init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); | 963 | alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); |
935 | 964 | alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); | |
936 | init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); | 965 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); |
937 | init_one_kbuf(&tb->resum_kernel_buf_pa); | 966 | alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); |
967 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); | ||
938 | 968 | ||
939 | init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); | 969 | init_cpu_send_mondo_info(tb, use_bootmem); |
940 | init_one_kbuf(&tb->nonresum_kernel_buf_pa); | ||
941 | 970 | ||
942 | init_cpu_send_mondo_info(tb); | 971 | sun4v_register_mondo_queues(cpu); |
943 | } | 972 | } |
944 | 973 | ||
945 | /* Only invoked on boot processor. */ | 974 | /* Only invoked on boot processor. */ |
@@ -950,7 +979,7 @@ void __init init_IRQ(void) | |||
950 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 979 | memset(&ivector_table[0], 0, sizeof(ivector_table)); |
951 | 980 | ||
952 | if (tlb_type == hypervisor) | 981 | if (tlb_type == hypervisor) |
953 | sun4v_init_mondo_queues(); | 982 | sun4v_init_mondo_queues(1); |
954 | 983 | ||
955 | /* We need to clear any IRQ's pending in the soft interrupt | 984 | /* We need to clear any IRQ's pending in the soft interrupt |
956 | * registers, a spurious one could be left around from the | 985 | * registers, a spurious one could be left around from the |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 7d7e02ba297e..d637168ce37d 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -591,7 +591,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t | |||
591 | "2" (arg1), "3" (arg2), | 591 | "2" (arg1), "3" (arg2), |
592 | "i" (HV_FAST_TRAP) | 592 | "i" (HV_FAST_TRAP) |
593 | : "memory"); | 593 | : "memory"); |
594 | if (likely(func == HV_EOK)) | 594 | if (likely(arg0 == HV_EOK)) |
595 | break; | 595 | break; |
596 | 596 | ||
597 | if (unlikely(++retries > 100)) { | 597 | if (unlikely(++retries > 100)) { |
@@ -644,7 +644,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t | |||
644 | "2" (arg1), "3" (arg2), | 644 | "2" (arg1), "3" (arg2), |
645 | "i" (HV_FAST_TRAP) | 645 | "i" (HV_FAST_TRAP) |
646 | : "memory"); | 646 | : "memory"); |
647 | if (likely(func == HV_EOK)) | 647 | if (likely(arg0 == HV_EOK)) |
648 | break; | 648 | break; |
649 | 649 | ||
650 | if (unlikely(++retries > 100)) { | 650 | if (unlikely(++retries > 100)) { |
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 88382200c7b8..22fb24eac997 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S | |||
@@ -367,7 +367,7 @@ after_lock_tlb: | |||
367 | nop | 367 | nop |
368 | 368 | ||
369 | call sun4v_init_mondo_queues | 369 | call sun4v_init_mondo_queues |
370 | nop | 370 | mov 0, %o0 |
371 | 371 | ||
372 | 1: call init_cur_cpu_trap | 372 | 1: call init_cur_cpu_trap |
373 | nop | 373 | nop |