diff options
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 49 |
1 files changed, 40 insertions, 9 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 59f020d69d4c..3aba47624df4 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/cache.h> | 20 | #include <linux/cache.h> |
21 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
22 | #include <linux/profile.h> | 22 | #include <linux/profile.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/lmb.h> |
24 | 24 | ||
25 | #include <asm/head.h> | 25 | #include <asm/head.h> |
26 | #include <asm/ptrace.h> | 26 | #include <asm/ptrace.h> |
@@ -38,7 +38,6 @@ | |||
38 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
39 | #include <asm/oplib.h> | 39 | #include <asm/oplib.h> |
40 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
41 | #include <asm/timer.h> | ||
42 | #include <asm/starfire.h> | 41 | #include <asm/starfire.h> |
43 | #include <asm/tlb.h> | 42 | #include <asm/tlb.h> |
44 | #include <asm/sections.h> | 43 | #include <asm/sections.h> |
@@ -866,14 +865,21 @@ void smp_call_function_client(int irq, struct pt_regs *regs) | |||
866 | void *info = call_data->info; | 865 | void *info = call_data->info; |
867 | 866 | ||
868 | clear_softint(1 << irq); | 867 | clear_softint(1 << irq); |
868 | |||
869 | irq_enter(); | ||
870 | |||
871 | if (!call_data->wait) { | ||
872 | /* let initiator proceed after getting data */ | ||
873 | atomic_inc(&call_data->finished); | ||
874 | } | ||
875 | |||
876 | func(info); | ||
877 | |||
878 | irq_exit(); | ||
879 | |||
869 | if (call_data->wait) { | 880 | if (call_data->wait) { |
870 | /* let initiator proceed only after completion */ | 881 | /* let initiator proceed only after completion */ |
871 | func(info); | ||
872 | atomic_inc(&call_data->finished); | ||
873 | } else { | ||
874 | /* let initiator proceed after getting data */ | ||
875 | atomic_inc(&call_data->finished); | 882 | atomic_inc(&call_data->finished); |
876 | func(info); | ||
877 | } | 883 | } |
878 | } | 884 | } |
879 | 885 | ||
@@ -903,6 +909,9 @@ extern unsigned long xcall_flush_tlb_kernel_range; | |||
903 | extern unsigned long xcall_report_regs; | 909 | extern unsigned long xcall_report_regs; |
904 | extern unsigned long xcall_receive_signal; | 910 | extern unsigned long xcall_receive_signal; |
905 | extern unsigned long xcall_new_mmu_context_version; | 911 | extern unsigned long xcall_new_mmu_context_version; |
912 | #ifdef CONFIG_KGDB | ||
913 | extern unsigned long xcall_kgdb_capture; | ||
914 | #endif | ||
906 | 915 | ||
907 | #ifdef DCACHE_ALIASING_POSSIBLE | 916 | #ifdef DCACHE_ALIASING_POSSIBLE |
908 | extern unsigned long xcall_flush_dcache_page_cheetah; | 917 | extern unsigned long xcall_flush_dcache_page_cheetah; |
@@ -1032,7 +1041,9 @@ void smp_receive_signal(int cpu) | |||
1032 | 1041 | ||
1033 | void smp_receive_signal_client(int irq, struct pt_regs *regs) | 1042 | void smp_receive_signal_client(int irq, struct pt_regs *regs) |
1034 | { | 1043 | { |
1044 | irq_enter(); | ||
1035 | clear_softint(1 << irq); | 1045 | clear_softint(1 << irq); |
1046 | irq_exit(); | ||
1036 | } | 1047 | } |
1037 | 1048 | ||
1038 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | 1049 | void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) |
@@ -1040,6 +1051,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | |||
1040 | struct mm_struct *mm; | 1051 | struct mm_struct *mm; |
1041 | unsigned long flags; | 1052 | unsigned long flags; |
1042 | 1053 | ||
1054 | irq_enter(); | ||
1055 | |||
1043 | clear_softint(1 << irq); | 1056 | clear_softint(1 << irq); |
1044 | 1057 | ||
1045 | /* See if we need to allocate a new TLB context because | 1058 | /* See if we need to allocate a new TLB context because |
@@ -1059,6 +1072,8 @@ void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | |||
1059 | load_secondary_context(mm); | 1072 | load_secondary_context(mm); |
1060 | __flush_tlb_mm(CTX_HWBITS(mm->context), | 1073 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
1061 | SECONDARY_CONTEXT); | 1074 | SECONDARY_CONTEXT); |
1075 | |||
1076 | irq_exit(); | ||
1062 | } | 1077 | } |
1063 | 1078 | ||
1064 | void smp_new_mmu_context_version(void) | 1079 | void smp_new_mmu_context_version(void) |
@@ -1066,6 +1081,13 @@ void smp_new_mmu_context_version(void) | |||
1066 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); | 1081 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); |
1067 | } | 1082 | } |
1068 | 1083 | ||
1084 | #ifdef CONFIG_KGDB | ||
1085 | void kgdb_roundup_cpus(unsigned long flags) | ||
1086 | { | ||
1087 | smp_cross_call(&xcall_kgdb_capture, 0, 0, 0); | ||
1088 | } | ||
1089 | #endif | ||
1090 | |||
1069 | void smp_report_regs(void) | 1091 | void smp_report_regs(void) |
1070 | { | 1092 | { |
1071 | smp_cross_call(&xcall_report_regs, 0, 0, 0); | 1093 | smp_cross_call(&xcall_report_regs, 0, 0, 0); |
@@ -1217,6 +1239,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) | |||
1217 | { | 1239 | { |
1218 | clear_softint(1 << irq); | 1240 | clear_softint(1 << irq); |
1219 | 1241 | ||
1242 | irq_enter(); | ||
1243 | |||
1220 | preempt_disable(); | 1244 | preempt_disable(); |
1221 | 1245 | ||
1222 | __asm__ __volatile__("flushw"); | 1246 | __asm__ __volatile__("flushw"); |
@@ -1229,6 +1253,8 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) | |||
1229 | prom_world(0); | 1253 | prom_world(0); |
1230 | 1254 | ||
1231 | preempt_enable(); | 1255 | preempt_enable(); |
1256 | |||
1257 | irq_exit(); | ||
1232 | } | 1258 | } |
1233 | 1259 | ||
1234 | /* /proc/profile writes can call this, don't __init it please. */ | 1260 | /* /proc/profile writes can call this, don't __init it please. */ |
@@ -1431,7 +1457,7 @@ EXPORT_SYMBOL(__per_cpu_shift); | |||
1431 | 1457 | ||
1432 | void __init real_setup_per_cpu_areas(void) | 1458 | void __init real_setup_per_cpu_areas(void) |
1433 | { | 1459 | { |
1434 | unsigned long goal, size, i; | 1460 | unsigned long paddr, goal, size, i; |
1435 | char *ptr; | 1461 | char *ptr; |
1436 | 1462 | ||
1437 | /* Copy section for each CPU (we discard the original) */ | 1463 | /* Copy section for each CPU (we discard the original) */ |
@@ -1441,8 +1467,13 @@ void __init real_setup_per_cpu_areas(void) | |||
1441 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) | 1467 | for (size = PAGE_SIZE; size < goal; size <<= 1UL) |
1442 | __per_cpu_shift++; | 1468 | __per_cpu_shift++; |
1443 | 1469 | ||
1444 | ptr = alloc_bootmem_pages(size * NR_CPUS); | 1470 | paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE); |
1471 | if (!paddr) { | ||
1472 | prom_printf("Cannot allocate per-cpu memory.\n"); | ||
1473 | prom_halt(); | ||
1474 | } | ||
1445 | 1475 | ||
1476 | ptr = __va(paddr); | ||
1446 | __per_cpu_base = ptr - __per_cpu_start; | 1477 | __per_cpu_base = ptr - __per_cpu_start; |
1447 | 1478 | ||
1448 | for (i = 0; i < NR_CPUS; i++, ptr += size) | 1479 | for (i = 0; i < NR_CPUS; i++, ptr += size) |