aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/sun4m_smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/sun4m_smp.c')
-rw-r--r--arch/sparc/kernel/sun4m_smp.c181
1 files changed, 76 insertions, 105 deletions
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 1dde312eebda..70b375a4c2c2 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -40,15 +40,11 @@ extern ctxd_t *srmmu_ctx_table_phys;
40extern void calibrate_delay(void); 40extern void calibrate_delay(void);
41 41
42extern volatile int smp_processors_ready; 42extern volatile int smp_processors_ready;
43extern int smp_num_cpus;
44extern volatile unsigned long cpu_callin_map[NR_CPUS]; 43extern volatile unsigned long cpu_callin_map[NR_CPUS];
45extern unsigned char boot_cpu_id; 44extern unsigned char boot_cpu_id;
46extern int smp_activated; 45
47extern volatile int __cpu_number_map[NR_CPUS]; 46extern cpumask_t smp_commenced_mask;
48extern volatile int __cpu_logical_map[NR_CPUS]; 47
49extern volatile unsigned long ipi_count;
50extern volatile int smp_process_available;
51extern volatile int smp_commenced;
52extern int __smp4m_processor_id(void); 48extern int __smp4m_processor_id(void);
53 49
54/*#define SMP_DEBUG*/ 50/*#define SMP_DEBUG*/
@@ -77,8 +73,6 @@ void __init smp4m_callin(void)
77 local_flush_cache_all(); 73 local_flush_cache_all();
78 local_flush_tlb_all(); 74 local_flush_tlb_all();
79 75
80 set_irq_udt(boot_cpu_id);
81
82 /* Get our local ticker going. */ 76 /* Get our local ticker going. */
83 smp_setup_percpu_timer(); 77 smp_setup_percpu_timer();
84 78
@@ -95,8 +89,9 @@ void __init smp4m_callin(void)
95 * to call the scheduler code. 89 * to call the scheduler code.
96 */ 90 */
97 /* Allow master to continue. */ 91 /* Allow master to continue. */
98 swap((unsigned long *)&cpu_callin_map[cpuid], 1); 92 swap(&cpu_callin_map[cpuid], 1);
99 93
94 /* XXX: What's up with all the flushes? */
100 local_flush_cache_all(); 95 local_flush_cache_all();
101 local_flush_tlb_all(); 96 local_flush_tlb_all();
102 97
@@ -111,13 +106,14 @@ void __init smp4m_callin(void)
111 atomic_inc(&init_mm.mm_count); 106 atomic_inc(&init_mm.mm_count);
112 current->active_mm = &init_mm; 107 current->active_mm = &init_mm;
113 108
114 while(!smp_commenced) 109 while (!cpu_isset(cpuid, smp_commenced_mask))
115 barrier(); 110 mb();
116
117 local_flush_cache_all();
118 local_flush_tlb_all();
119 111
120 local_irq_enable(); 112 local_irq_enable();
113
114 cpu_set(cpuid, cpu_online_map);
115 /* last one in gets all the interrupts (for testing) */
116 set_irq_udt(boot_cpu_id);
121} 117}
122 118
123extern void init_IRQ(void); 119extern void init_IRQ(void);
@@ -134,102 +130,76 @@ extern unsigned long trapbase_cpu3[];
134 130
135void __init smp4m_boot_cpus(void) 131void __init smp4m_boot_cpus(void)
136{ 132{
137 int cpucount = 0; 133 smp_setup_percpu_timer();
138 int i, mid; 134 local_flush_cache_all();
135}
139 136
140 printk("Entering SMP Mode...\n"); 137int smp4m_boot_one_cpu(int i)
138{
139 extern unsigned long sun4m_cpu_startup;
140 unsigned long *entry = &sun4m_cpu_startup;
141 struct task_struct *p;
142 int timeout;
143 int cpu_node;
141 144
142 local_irq_enable(); 145 cpu_find_by_mid(i, &cpu_node);
143 cpus_clear(cpu_present_map); 146
147 /* Cook up an idler for this guy. */
148 p = fork_idle(i);
149 current_set[i] = task_thread_info(p);
150 /* See trampoline.S for details... */
151 entry += ((i-1) * 3);
144 152
145 for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) 153 /*
146 cpu_set(mid, cpu_present_map); 154 * Initialize the contexts table
155 * Since the call to prom_startcpu() trashes the structure,
156 * we need to re-initialize it for each cpu
157 */
158 smp_penguin_ctable.which_io = 0;
159 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
160 smp_penguin_ctable.reg_size = 0;
147 161
148 for(i=0; i < NR_CPUS; i++) { 162 /* whirrr, whirrr, whirrrrrrrrr... */
149 __cpu_number_map[i] = -1; 163 printk("Starting CPU %d at %p\n", i, entry);
150 __cpu_logical_map[i] = -1; 164 local_flush_cache_all();
165 prom_startcpu(cpu_node,
166 &smp_penguin_ctable, 0, (char *)entry);
167
168 /* wheee... it's going... */
169 for(timeout = 0; timeout < 10000; timeout++) {
170 if(cpu_callin_map[i])
171 break;
172 udelay(200);
151 } 173 }
152 174
153 __cpu_number_map[boot_cpu_id] = 0; 175 if (!(cpu_callin_map[i])) {
154 __cpu_logical_map[0] = boot_cpu_id; 176 printk("Processor %d is stuck.\n", i);
155 current_thread_info()->cpu = boot_cpu_id; 177 return -ENODEV;
178 }
156 179
157 smp_store_cpu_info(boot_cpu_id);
158 set_irq_udt(boot_cpu_id);
159 smp_setup_percpu_timer();
160 local_flush_cache_all(); 180 local_flush_cache_all();
161 if(cpu_find_by_instance(1, NULL, NULL)) 181 return 0;
162 return; /* Not an MP box. */ 182}
163 for(i = 0; i < NR_CPUS; i++) { 183
164 if(i == boot_cpu_id) 184void __init smp4m_smp_done(void)
165 continue; 185{
166 186 int i, first;
167 if (cpu_isset(i, cpu_present_map)) { 187 int *prev;
168 extern unsigned long sun4m_cpu_startup; 188
169 unsigned long *entry = &sun4m_cpu_startup; 189 /* setup cpu list for irq rotation */
170 struct task_struct *p; 190 first = 0;
171 int timeout; 191 prev = &first;
172 192 for (i = 0; i < NR_CPUS; i++) {
173 /* Cook up an idler for this guy. */ 193 if (cpu_online(i)) {
174 p = fork_idle(i); 194 *prev = i;
175 cpucount++; 195 prev = &cpu_data(i).next;
176 current_set[i] = task_thread_info(p);
177 /* See trampoline.S for details... */
178 entry += ((i-1) * 3);
179
180 /*
181 * Initialize the contexts table
182 * Since the call to prom_startcpu() trashes the structure,
183 * we need to re-initialize it for each cpu
184 */
185 smp_penguin_ctable.which_io = 0;
186 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
187 smp_penguin_ctable.reg_size = 0;
188
189 /* whirrr, whirrr, whirrrrrrrrr... */
190 printk("Starting CPU %d at %p\n", i, entry);
191 local_flush_cache_all();
192 prom_startcpu(cpu_data(i).prom_node,
193 &smp_penguin_ctable, 0, (char *)entry);
194
195 /* wheee... it's going... */
196 for(timeout = 0; timeout < 10000; timeout++) {
197 if(cpu_callin_map[i])
198 break;
199 udelay(200);
200 }
201 if(cpu_callin_map[i]) {
202 /* Another "Red Snapper". */
203 __cpu_number_map[i] = i;
204 __cpu_logical_map[i] = i;
205 } else {
206 cpucount--;
207 printk("Processor %d is stuck.\n", i);
208 }
209 }
210 if(!(cpu_callin_map[i])) {
211 cpu_clear(i, cpu_present_map);
212 __cpu_number_map[i] = -1;
213 } 196 }
214 } 197 }
198 *prev = first;
215 local_flush_cache_all(); 199 local_flush_cache_all();
216 if(cpucount == 0) {
217 printk("Error: only one Processor found.\n");
218 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219 } else {
220 unsigned long bogosum = 0;
221 for_each_present_cpu(i)
222 bogosum += cpu_data(i).udelay_val;
223 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
224 cpucount + 1,
225 bogosum/(500000/HZ),
226 (bogosum/(5000/HZ))%100);
227 smp_activated = 1;
228 smp_num_cpus = cpucount + 1;
229 }
230 200
231 /* Free unneeded trap tables */ 201 /* Free unneeded trap tables */
232 if (!cpu_isset(i, cpu_present_map)) { 202 if (!cpu_isset(1, cpu_present_map)) {
233 ClearPageReserved(virt_to_page(trapbase_cpu1)); 203 ClearPageReserved(virt_to_page(trapbase_cpu1));
234 init_page_count(virt_to_page(trapbase_cpu1)); 204 init_page_count(virt_to_page(trapbase_cpu1));
235 free_page((unsigned long)trapbase_cpu1); 205 free_page((unsigned long)trapbase_cpu1);
@@ -263,6 +233,9 @@ void __init smp4m_boot_cpus(void)
263 */ 233 */
264void smp4m_irq_rotate(int cpu) 234void smp4m_irq_rotate(int cpu)
265{ 235{
236 int next = cpu_data(cpu).next;
237 if (next != cpu)
238 set_irq_udt(next);
266} 239}
267 240
268/* Cross calls, in order to work efficiently and atomically do all 241/* Cross calls, in order to work efficiently and atomically do all
@@ -289,7 +262,7 @@ void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
289 262
290 smp_cpu_in_msg[me]++; 263 smp_cpu_in_msg[me]++;
291 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) { 264 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
292 mask = cpu_present_map; 265 mask = cpu_online_map;
293 if(target == MSG_ALL_BUT_SELF) 266 if(target == MSG_ALL_BUT_SELF)
294 cpu_clear(me, mask); 267 cpu_clear(me, mask);
295 for(i = 0; i < 4; i++) { 268 for(i = 0; i < 4; i++) {
@@ -314,8 +287,8 @@ static struct smp_funcall {
314 unsigned long arg3; 287 unsigned long arg3;
315 unsigned long arg4; 288 unsigned long arg4;
316 unsigned long arg5; 289 unsigned long arg5;
317 unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ 290 unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */
318 unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ 291 unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
319} ccall_info; 292} ccall_info;
320 293
321static DEFINE_SPINLOCK(cross_call_lock); 294static DEFINE_SPINLOCK(cross_call_lock);
@@ -324,8 +297,7 @@ static DEFINE_SPINLOCK(cross_call_lock);
324void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, 297void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
325 unsigned long arg3, unsigned long arg4, unsigned long arg5) 298 unsigned long arg3, unsigned long arg4, unsigned long arg5)
326{ 299{
327 if(smp_processors_ready) { 300 register int ncpus = SUN4M_NCPUS;
328 register int ncpus = smp_num_cpus;
329 unsigned long flags; 301 unsigned long flags;
330 302
331 spin_lock_irqsave(&cross_call_lock, flags); 303 spin_lock_irqsave(&cross_call_lock, flags);
@@ -340,7 +312,7 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
340 312
341 /* Init receive/complete mapping, plus fire the IPI's off. */ 313 /* Init receive/complete mapping, plus fire the IPI's off. */
342 { 314 {
343 cpumask_t mask = cpu_present_map; 315 cpumask_t mask = cpu_online_map;
344 register int i; 316 register int i;
345 317
346 cpu_clear(smp_processor_id(), mask); 318 cpu_clear(smp_processor_id(), mask);
@@ -373,7 +345,6 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
373 } 345 }
374 346
375 spin_unlock_irqrestore(&cross_call_lock, flags); 347 spin_unlock_irqrestore(&cross_call_lock, flags);
376 }
377} 348}
378 349
379/* Running cross calls. */ 350/* Running cross calls. */