diff options
author | Graf Yang <graf.yang@analog.com> | 2009-01-07 10:14:39 -0500 |
---|---|---|
committer | Bryan Wu <cooloney@kernel.org> | 2009-01-07 10:14:39 -0500 |
commit | 6b3087c64a92a36ae20d33479b4df6d7afc910d4 (patch) | |
tree | 95984fc623658ebf150d0d912a7f6c5a0301a5a9 /arch/blackfin/mach-common/smp.c | |
parent | c51b4488cd5bff08ed5690a8f303ff7f0894da2a (diff) |
Blackfin arch: SMP supporting patchset: Blackfin header files and machine common code
Blackfin dual core BF561 processor can support SMP like features.
https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like
In this patch, we provide SMP extend to Blackfin header files
and machine common code
Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/mach-common/smp.c')
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 476 |
1 files changed, 476 insertions, 0 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c new file mode 100644 index 000000000000..7aeecedd3147 --- /dev/null +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -0,0 +1,476 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/kernel/smp.c | ||
3 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
4 | * IPI management based on arch/arm/kernel/smp.c. | ||
5 | * | ||
6 | * Copyright 2007 Analog Devices Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, see the file COPYING, or write | ||
20 | * to the Free Software Foundation, Inc., | ||
21 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/cache.h> | ||
31 | #include <linux/profile.h> | ||
32 | #include <linux/errno.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/cpu.h> | ||
35 | #include <linux/smp.h> | ||
36 | #include <linux/seq_file.h> | ||
37 | #include <linux/irq.h> | ||
38 | #include <asm/atomic.h> | ||
39 | #include <asm/cacheflush.h> | ||
40 | #include <asm/mmu_context.h> | ||
41 | #include <asm/pgtable.h> | ||
42 | #include <asm/pgalloc.h> | ||
43 | #include <asm/processor.h> | ||
44 | #include <asm/ptrace.h> | ||
45 | #include <asm/cpu.h> | ||
46 | #include <linux/err.h> | ||
47 | |||
48 | struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); | ||
49 | |||
50 | void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, | ||
51 | *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, | ||
52 | *init_saved_dcplb_fault_addr_coreb; | ||
53 | |||
54 | cpumask_t cpu_possible_map; | ||
55 | EXPORT_SYMBOL(cpu_possible_map); | ||
56 | |||
57 | cpumask_t cpu_online_map; | ||
58 | EXPORT_SYMBOL(cpu_online_map); | ||
59 | |||
60 | #define BFIN_IPI_RESCHEDULE 0 | ||
61 | #define BFIN_IPI_CALL_FUNC 1 | ||
62 | #define BFIN_IPI_CPU_STOP 2 | ||
63 | |||
64 | struct blackfin_flush_data { | ||
65 | unsigned long start; | ||
66 | unsigned long end; | ||
67 | }; | ||
68 | |||
69 | void *secondary_stack; | ||
70 | |||
71 | |||
72 | struct smp_call_struct { | ||
73 | void (*func)(void *info); | ||
74 | void *info; | ||
75 | int wait; | ||
76 | cpumask_t pending; | ||
77 | cpumask_t waitmask; | ||
78 | }; | ||
79 | |||
80 | static struct blackfin_flush_data smp_flush_data; | ||
81 | |||
82 | static DEFINE_SPINLOCK(stop_lock); | ||
83 | |||
84 | struct ipi_message { | ||
85 | struct list_head list; | ||
86 | unsigned long type; | ||
87 | struct smp_call_struct call_struct; | ||
88 | }; | ||
89 | |||
90 | struct ipi_message_queue { | ||
91 | struct list_head head; | ||
92 | spinlock_t lock; | ||
93 | unsigned long count; | ||
94 | }; | ||
95 | |||
96 | static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); | ||
97 | |||
98 | static void ipi_cpu_stop(unsigned int cpu) | ||
99 | { | ||
100 | spin_lock(&stop_lock); | ||
101 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | ||
102 | dump_stack(); | ||
103 | spin_unlock(&stop_lock); | ||
104 | |||
105 | cpu_clear(cpu, cpu_online_map); | ||
106 | |||
107 | local_irq_disable(); | ||
108 | |||
109 | while (1) | ||
110 | SSYNC(); | ||
111 | } | ||
112 | |||
113 | static void ipi_flush_icache(void *info) | ||
114 | { | ||
115 | struct blackfin_flush_data *fdata = info; | ||
116 | |||
117 | /* Invalidate the memory holding the bounds of the flushed region. */ | ||
118 | blackfin_dcache_invalidate_range((unsigned long)fdata, | ||
119 | (unsigned long)fdata + sizeof(*fdata)); | ||
120 | |||
121 | blackfin_icache_flush_range(fdata->start, fdata->end); | ||
122 | } | ||
123 | |||
124 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | ||
125 | { | ||
126 | int wait; | ||
127 | void (*func)(void *info); | ||
128 | void *info; | ||
129 | func = msg->call_struct.func; | ||
130 | info = msg->call_struct.info; | ||
131 | wait = msg->call_struct.wait; | ||
132 | cpu_clear(cpu, msg->call_struct.pending); | ||
133 | func(info); | ||
134 | if (wait) | ||
135 | cpu_clear(cpu, msg->call_struct.waitmask); | ||
136 | else | ||
137 | kfree(msg); | ||
138 | } | ||
139 | |||
140 | static irqreturn_t ipi_handler(int irq, void *dev_instance) | ||
141 | { | ||
142 | struct ipi_message *msg, *mg; | ||
143 | struct ipi_message_queue *msg_queue; | ||
144 | unsigned int cpu = smp_processor_id(); | ||
145 | |||
146 | platform_clear_ipi(cpu); | ||
147 | |||
148 | msg_queue = &__get_cpu_var(ipi_msg_queue); | ||
149 | msg_queue->count++; | ||
150 | |||
151 | spin_lock(&msg_queue->lock); | ||
152 | list_for_each_entry_safe(msg, mg, &msg_queue->head, list) { | ||
153 | list_del(&msg->list); | ||
154 | switch (msg->type) { | ||
155 | case BFIN_IPI_RESCHEDULE: | ||
156 | /* That's the easiest one; leave it to | ||
157 | * return_from_int. */ | ||
158 | kfree(msg); | ||
159 | break; | ||
160 | case BFIN_IPI_CALL_FUNC: | ||
161 | ipi_call_function(cpu, msg); | ||
162 | break; | ||
163 | case BFIN_IPI_CPU_STOP: | ||
164 | ipi_cpu_stop(cpu); | ||
165 | kfree(msg); | ||
166 | break; | ||
167 | default: | ||
168 | printk(KERN_CRIT "CPU%u: Unknown IPI message \ | ||
169 | 0x%lx\n", cpu, msg->type); | ||
170 | kfree(msg); | ||
171 | break; | ||
172 | } | ||
173 | } | ||
174 | spin_unlock(&msg_queue->lock); | ||
175 | return IRQ_HANDLED; | ||
176 | } | ||
177 | |||
178 | static void ipi_queue_init(void) | ||
179 | { | ||
180 | unsigned int cpu; | ||
181 | struct ipi_message_queue *msg_queue; | ||
182 | for_each_possible_cpu(cpu) { | ||
183 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
184 | INIT_LIST_HEAD(&msg_queue->head); | ||
185 | spin_lock_init(&msg_queue->lock); | ||
186 | msg_queue->count = 0; | ||
187 | } | ||
188 | } | ||
189 | |||
190 | int smp_call_function(void (*func)(void *info), void *info, int wait) | ||
191 | { | ||
192 | unsigned int cpu; | ||
193 | cpumask_t callmap; | ||
194 | unsigned long flags; | ||
195 | struct ipi_message_queue *msg_queue; | ||
196 | struct ipi_message *msg; | ||
197 | |||
198 | callmap = cpu_online_map; | ||
199 | cpu_clear(smp_processor_id(), callmap); | ||
200 | if (cpus_empty(callmap)) | ||
201 | return 0; | ||
202 | |||
203 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | ||
204 | INIT_LIST_HEAD(&msg->list); | ||
205 | msg->call_struct.func = func; | ||
206 | msg->call_struct.info = info; | ||
207 | msg->call_struct.wait = wait; | ||
208 | msg->call_struct.pending = callmap; | ||
209 | msg->call_struct.waitmask = callmap; | ||
210 | msg->type = BFIN_IPI_CALL_FUNC; | ||
211 | |||
212 | for_each_cpu_mask(cpu, callmap) { | ||
213 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
214 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
215 | list_add(&msg->list, &msg_queue->head); | ||
216 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
217 | platform_send_ipi_cpu(cpu); | ||
218 | } | ||
219 | if (wait) { | ||
220 | while (!cpus_empty(msg->call_struct.waitmask)) | ||
221 | blackfin_dcache_invalidate_range( | ||
222 | (unsigned long)(&msg->call_struct.waitmask), | ||
223 | (unsigned long)(&msg->call_struct.waitmask)); | ||
224 | kfree(msg); | ||
225 | } | ||
226 | return 0; | ||
227 | } | ||
228 | EXPORT_SYMBOL_GPL(smp_call_function); | ||
229 | |||
230 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | ||
231 | int wait) | ||
232 | { | ||
233 | unsigned int cpu = cpuid; | ||
234 | cpumask_t callmap; | ||
235 | unsigned long flags; | ||
236 | struct ipi_message_queue *msg_queue; | ||
237 | struct ipi_message *msg; | ||
238 | |||
239 | if (cpu_is_offline(cpu)) | ||
240 | return 0; | ||
241 | cpus_clear(callmap); | ||
242 | cpu_set(cpu, callmap); | ||
243 | |||
244 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | ||
245 | INIT_LIST_HEAD(&msg->list); | ||
246 | msg->call_struct.func = func; | ||
247 | msg->call_struct.info = info; | ||
248 | msg->call_struct.wait = wait; | ||
249 | msg->call_struct.pending = callmap; | ||
250 | msg->call_struct.waitmask = callmap; | ||
251 | msg->type = BFIN_IPI_CALL_FUNC; | ||
252 | |||
253 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
254 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
255 | list_add(&msg->list, &msg_queue->head); | ||
256 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
257 | platform_send_ipi_cpu(cpu); | ||
258 | |||
259 | if (wait) { | ||
260 | while (!cpus_empty(msg->call_struct.waitmask)) | ||
261 | blackfin_dcache_invalidate_range( | ||
262 | (unsigned long)(&msg->call_struct.waitmask), | ||
263 | (unsigned long)(&msg->call_struct.waitmask)); | ||
264 | kfree(msg); | ||
265 | } | ||
266 | return 0; | ||
267 | } | ||
268 | EXPORT_SYMBOL_GPL(smp_call_function_single); | ||
269 | |||
270 | void smp_send_reschedule(int cpu) | ||
271 | { | ||
272 | unsigned long flags; | ||
273 | struct ipi_message_queue *msg_queue; | ||
274 | struct ipi_message *msg; | ||
275 | |||
276 | if (cpu_is_offline(cpu)) | ||
277 | return; | ||
278 | |||
279 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | ||
280 | memset(msg, 0, sizeof(msg)); | ||
281 | INIT_LIST_HEAD(&msg->list); | ||
282 | msg->type = BFIN_IPI_RESCHEDULE; | ||
283 | |||
284 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
285 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
286 | list_add(&msg->list, &msg_queue->head); | ||
287 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
288 | platform_send_ipi_cpu(cpu); | ||
289 | |||
290 | return; | ||
291 | } | ||
292 | |||
293 | void smp_send_stop(void) | ||
294 | { | ||
295 | unsigned int cpu; | ||
296 | cpumask_t callmap; | ||
297 | unsigned long flags; | ||
298 | struct ipi_message_queue *msg_queue; | ||
299 | struct ipi_message *msg; | ||
300 | |||
301 | callmap = cpu_online_map; | ||
302 | cpu_clear(smp_processor_id(), callmap); | ||
303 | if (cpus_empty(callmap)) | ||
304 | return; | ||
305 | |||
306 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | ||
307 | memset(msg, 0, sizeof(msg)); | ||
308 | INIT_LIST_HEAD(&msg->list); | ||
309 | msg->type = BFIN_IPI_CPU_STOP; | ||
310 | |||
311 | for_each_cpu_mask(cpu, callmap) { | ||
312 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
313 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
314 | list_add(&msg->list, &msg_queue->head); | ||
315 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
316 | platform_send_ipi_cpu(cpu); | ||
317 | } | ||
318 | return; | ||
319 | } | ||
320 | |||
321 | int __cpuinit __cpu_up(unsigned int cpu) | ||
322 | { | ||
323 | struct task_struct *idle; | ||
324 | int ret; | ||
325 | |||
326 | idle = fork_idle(cpu); | ||
327 | if (IS_ERR(idle)) { | ||
328 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | ||
329 | return PTR_ERR(idle); | ||
330 | } | ||
331 | |||
332 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; | ||
333 | smp_wmb(); | ||
334 | |||
335 | ret = platform_boot_secondary(cpu, idle); | ||
336 | |||
337 | if (ret) { | ||
338 | cpu_clear(cpu, cpu_present_map); | ||
339 | printk(KERN_CRIT "CPU%u: processor failed to boot (%d)\n", cpu, ret); | ||
340 | free_task(idle); | ||
341 | } else | ||
342 | cpu_set(cpu, cpu_online_map); | ||
343 | |||
344 | secondary_stack = NULL; | ||
345 | |||
346 | return ret; | ||
347 | } | ||
348 | |||
349 | static void __cpuinit setup_secondary(unsigned int cpu) | ||
350 | { | ||
351 | #ifndef CONFIG_TICK_SOURCE_SYSTMR0 | ||
352 | struct irq_desc *timer_desc; | ||
353 | #endif | ||
354 | unsigned long ilat; | ||
355 | |||
356 | bfin_write_IMASK(0); | ||
357 | CSYNC(); | ||
358 | ilat = bfin_read_ILAT(); | ||
359 | CSYNC(); | ||
360 | bfin_write_ILAT(ilat); | ||
361 | CSYNC(); | ||
362 | |||
363 | /* Reserve the PDA space for the secondary CPU. */ | ||
364 | reserve_pda(); | ||
365 | |||
366 | /* Enable interrupt levels IVG7-15. IARs have been already | ||
367 | * programmed by the boot CPU. */ | ||
368 | irq_flags |= IMASK_IVG15 | | ||
369 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | | ||
370 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; | ||
371 | |||
372 | #ifdef CONFIG_TICK_SOURCE_SYSTMR0 | ||
373 | /* Power down the core timer, just to play safe. */ | ||
374 | bfin_write_TCNTL(0); | ||
375 | |||
376 | /* system timer0 has been setup by CoreA. */ | ||
377 | #else | ||
378 | timer_desc = irq_desc + IRQ_CORETMR; | ||
379 | setup_core_timer(); | ||
380 | timer_desc->chip->enable(IRQ_CORETMR); | ||
381 | #endif | ||
382 | } | ||
383 | |||
384 | void __cpuinit secondary_start_kernel(void) | ||
385 | { | ||
386 | unsigned int cpu = smp_processor_id(); | ||
387 | struct mm_struct *mm = &init_mm; | ||
388 | |||
389 | if (_bfin_swrst & SWRST_DBL_FAULT_B) { | ||
390 | printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n"); | ||
391 | #ifdef CONFIG_DEBUG_DOUBLEFAULT | ||
392 | printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", | ||
393 | (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb); | ||
394 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb); | ||
395 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb); | ||
396 | #endif | ||
397 | printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", | ||
398 | init_retx_coreb); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * We want the D-cache to be enabled early, in case the atomic | ||
403 | * support code emulates cache coherence (see | ||
404 | * __ARCH_SYNC_CORE_DCACHE). | ||
405 | */ | ||
406 | init_exception_vectors(); | ||
407 | |||
408 | bfin_setup_caches(cpu); | ||
409 | |||
410 | local_irq_disable(); | ||
411 | |||
412 | /* Attach the new idle task to the global mm. */ | ||
413 | atomic_inc(&mm->mm_users); | ||
414 | atomic_inc(&mm->mm_count); | ||
415 | current->active_mm = mm; | ||
416 | BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */ | ||
417 | |||
418 | preempt_disable(); | ||
419 | |||
420 | setup_secondary(cpu); | ||
421 | |||
422 | local_irq_enable(); | ||
423 | |||
424 | platform_secondary_init(cpu); | ||
425 | |||
426 | cpu_idle(); | ||
427 | } | ||
428 | |||
429 | void __init smp_prepare_boot_cpu(void) | ||
430 | { | ||
431 | } | ||
432 | |||
433 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
434 | { | ||
435 | platform_prepare_cpus(max_cpus); | ||
436 | ipi_queue_init(); | ||
437 | platform_request_ipi(&ipi_handler); | ||
438 | } | ||
439 | |||
440 | void __init smp_cpus_done(unsigned int max_cpus) | ||
441 | { | ||
442 | unsigned long bogosum = 0; | ||
443 | unsigned int cpu; | ||
444 | |||
445 | for_each_online_cpu(cpu) | ||
446 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | ||
447 | |||
448 | printk(KERN_INFO "SMP: Total of %d processors activated " | ||
449 | "(%lu.%02lu BogoMIPS).\n", | ||
450 | num_online_cpus(), | ||
451 | bogosum / (500000/HZ), | ||
452 | (bogosum / (5000/HZ)) % 100); | ||
453 | } | ||
454 | |||
455 | void smp_icache_flush_range_others(unsigned long start, unsigned long end) | ||
456 | { | ||
457 | smp_flush_data.start = start; | ||
458 | smp_flush_data.end = end; | ||
459 | |||
460 | if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1)) | ||
461 | printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n"); | ||
462 | } | ||
463 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); | ||
464 | |||
465 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
466 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); | ||
467 | |||
468 | void resync_core_dcache(void) | ||
469 | { | ||
470 | unsigned int cpu = get_cpu(); | ||
471 | blackfin_invalidate_entire_dcache(); | ||
472 | ++per_cpu(cpu_data, cpu).dcache_invld_count; | ||
473 | put_cpu(); | ||
474 | } | ||
475 | EXPORT_SYMBOL(resync_core_dcache); | ||
476 | #endif | ||