aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c55
1 files changed, 10 insertions, 45 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index d2d3369e7b5d..8c245859d212 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -38,6 +38,7 @@
38#include <asm/timer.h> 38#include <asm/timer.h>
39#include <asm/starfire.h> 39#include <asm/starfire.h>
40#include <asm/tlb.h> 40#include <asm/tlb.h>
41#include <asm/sections.h>
41 42
42extern void calibrate_delay(void); 43extern void calibrate_delay(void);
43 44
@@ -87,10 +88,6 @@ void __init smp_store_cpu_info(int id)
87 cpu_data(id).clock_tick = prom_getintdefault(cpu_node, 88 cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
88 "clock-frequency", 0); 89 "clock-frequency", 0);
89 90
90 cpu_data(id).pgcache_size = 0;
91 cpu_data(id).pte_cache[0] = NULL;
92 cpu_data(id).pte_cache[1] = NULL;
93 cpu_data(id).pgd_cache = NULL;
94 cpu_data(id).idle_volume = 1; 91 cpu_data(id).idle_volume = 1;
95 92
96 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", 93 cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
@@ -121,26 +118,15 @@ static volatile unsigned long callin_flag = 0;
121 118
122extern void inherit_locked_prom_mappings(int save_p); 119extern void inherit_locked_prom_mappings(int save_p);
123 120
124static inline void cpu_setup_percpu_base(unsigned long cpu_id)
125{
126#error IMMU TSB usage must be fixed
127 __asm__ __volatile__("mov %0, %%g5\n\t"
128 "stxa %0, [%1] %2\n\t"
129 "membar #Sync"
130 : /* no outputs */
131 : "r" (__per_cpu_offset(cpu_id)),
132 "r" (TSB_REG), "i" (ASI_IMMU));
133}
134
135void __init smp_callin(void) 121void __init smp_callin(void)
136{ 122{
137 int cpuid = hard_smp_processor_id(); 123 int cpuid = hard_smp_processor_id();
138 124
139 inherit_locked_prom_mappings(0); 125 inherit_locked_prom_mappings(0);
140 126
141 __flush_tlb_all(); 127 __local_per_cpu_offset = __per_cpu_offset(cpuid);
142 128
143 cpu_setup_percpu_base(cpuid); 129 __flush_tlb_all();
144 130
145 smp_setup_percpu_timer(); 131 smp_setup_percpu_timer();
146 132
@@ -1107,12 +1093,15 @@ void __init smp_setup_cpu_possible_map(void)
1107 1093
1108void __devinit smp_prepare_boot_cpu(void) 1094void __devinit smp_prepare_boot_cpu(void)
1109{ 1095{
1110 if (hard_smp_processor_id() >= NR_CPUS) { 1096 int cpu = hard_smp_processor_id();
1097
1098 if (cpu >= NR_CPUS) {
1111 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); 1099 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1112 prom_halt(); 1100 prom_halt();
1113 } 1101 }
1114 1102
1115 current_thread_info()->cpu = hard_smp_processor_id(); 1103 current_thread_info()->cpu = cpu;
1104 __local_per_cpu_offset = __per_cpu_offset(cpu);
1116 1105
1117 cpu_set(smp_processor_id(), cpu_online_map); 1106 cpu_set(smp_processor_id(), cpu_online_map);
1118 cpu_set(smp_processor_id(), phys_cpu_present_map); 1107 cpu_set(smp_processor_id(), phys_cpu_present_map);
@@ -1173,12 +1162,9 @@ void __init setup_per_cpu_areas(void)
1173{ 1162{
1174 unsigned long goal, size, i; 1163 unsigned long goal, size, i;
1175 char *ptr; 1164 char *ptr;
1176 /* Created by linker magic */
1177 extern char __per_cpu_start[], __per_cpu_end[];
1178 1165
1179 /* Copy section for each CPU (we discard the original) */ 1166 /* Copy section for each CPU (we discard the original) */
1180 goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); 1167 goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
1181
1182#ifdef CONFIG_MODULES 1168#ifdef CONFIG_MODULES
1183 if (goal < PERCPU_ENOUGH_ROOM) 1169 if (goal < PERCPU_ENOUGH_ROOM)
1184 goal = PERCPU_ENOUGH_ROOM; 1170 goal = PERCPU_ENOUGH_ROOM;
@@ -1187,31 +1173,10 @@ void __init setup_per_cpu_areas(void)
1187 for (size = 1UL; size < goal; size <<= 1UL) 1173 for (size = 1UL; size < goal; size <<= 1UL)
1188 __per_cpu_shift++; 1174 __per_cpu_shift++;
1189 1175
1190 /* Make sure the resulting __per_cpu_base value 1176 ptr = alloc_bootmem(size * NR_CPUS);
1191 * will fit in the 43-bit sign extended IMMU
1192 * TSB register.
1193 */
1194 ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
1195 (unsigned long) __per_cpu_start);
1196 1177
1197 __per_cpu_base = ptr - __per_cpu_start; 1178 __per_cpu_base = ptr - __per_cpu_start;
1198 1179
1199 if ((__per_cpu_shift < PAGE_SHIFT) ||
1200 (__per_cpu_base & ~PAGE_MASK) ||
1201 (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
1202 prom_printf("PER_CPU: Invalid layout, "
1203 "ptr[%p] shift[%lx] base[%lx]\n",
1204 ptr, __per_cpu_shift, __per_cpu_base);
1205 prom_halt();
1206 }
1207
1208 for (i = 0; i < NR_CPUS; i++, ptr += size) 1180 for (i = 0; i < NR_CPUS; i++, ptr += size)
1209 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 1181 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1210
1211 /* Finally, load in the boot cpu's base value.
1212 * We abuse the IMMU TSB register for trap handler
1213 * entry and exit loading of %g5. That is why it
1214 * has to be page aligned.
1215 */
1216 cpu_setup_percpu_base(hard_smp_processor_id());
1217} 1182}