aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/blackfin/include/asm/cpu.h3
-rw-r--r--arch/blackfin/include/asm/smp.h5
-rw-r--r--arch/blackfin/kernel/time-ts.c26
-rw-r--r--arch/blackfin/mach-bf561/include/mach/pll.h4
-rw-r--r--arch/blackfin/mach-bf561/smp.c2
-rw-r--r--arch/blackfin/mach-common/smp.c53
6 files changed, 77 insertions, 16 deletions
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index 05043786da21..e349631c8299 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -14,6 +14,9 @@ struct blackfin_cpudata {
14 struct cpu cpu; 14 struct cpu cpu;
15 unsigned int imemctl; 15 unsigned int imemctl;
16 unsigned int dmemctl; 16 unsigned int dmemctl;
17#ifdef CONFIG_SMP
18 struct task_struct *idle;
19#endif
17}; 20};
18 21
19DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); 22DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index af6c0aa79bae..dc3d144b4bb5 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -37,7 +37,7 @@ extern unsigned long dcache_invld_count[NR_CPUS];
37#endif 37#endif
38 38
39void smp_icache_flush_range_others(unsigned long start, 39void smp_icache_flush_range_others(unsigned long start,
40 unsigned long end); 40 unsigned long end);
41#ifdef CONFIG_HOTPLUG_CPU 41#ifdef CONFIG_HOTPLUG_CPU
42void coreb_die(void); 42void coreb_die(void);
43void cpu_die(void); 43void cpu_die(void);
@@ -46,4 +46,7 @@ int __cpu_disable(void);
46int __cpu_die(unsigned int cpu); 46int __cpu_die(unsigned int cpu);
47#endif 47#endif
48 48
49void smp_timer_broadcast(const struct cpumask *mask);
50
51
49#endif /* !__ASM_BLACKFIN_SMP_H */ 52#endif /* !__ASM_BLACKFIN_SMP_H */
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 1bcf3a3c57d8..6efd944a2f33 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -219,7 +219,7 @@ static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
219 219
220#if defined(CONFIG_TICKSOURCE_CORETMR) 220#if defined(CONFIG_TICKSOURCE_CORETMR)
221/* per-cpu local core timer */ 221/* per-cpu local core timer */
222static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); 222DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
223 223
224static int bfin_coretmr_set_next_event(unsigned long cycles, 224static int bfin_coretmr_set_next_event(unsigned long cycles,
225 struct clock_event_device *evt) 225 struct clock_event_device *evt)
@@ -281,6 +281,25 @@ void bfin_coretmr_init(void)
281#ifdef CONFIG_CORE_TIMER_IRQ_L1 281#ifdef CONFIG_CORE_TIMER_IRQ_L1
282__attribute__((l1_text)) 282__attribute__((l1_text))
283#endif 283#endif
284
285static void broadcast_timer_set_mode(enum clock_event_mode mode,
286 struct clock_event_device *evt)
287{
288}
289
290static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
291{
292 evt->name = "dummy_timer";
293 evt->features = CLOCK_EVT_FEAT_ONESHOT |
294 CLOCK_EVT_FEAT_PERIODIC |
295 CLOCK_EVT_FEAT_DUMMY;
296 evt->rating = 400;
297 evt->mult = 1;
298 evt->set_mode = broadcast_timer_set_mode;
299
300 clockevents_register_device(evt);
301}
302
284irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) 303irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
285{ 304{
286 int cpu = smp_processor_id(); 305 int cpu = smp_processor_id();
@@ -306,6 +325,11 @@ void bfin_coretmr_clockevent_init(void)
306 unsigned int cpu = smp_processor_id(); 325 unsigned int cpu = smp_processor_id();
307 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); 326 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
308 327
328#ifdef CONFIG_SMP
329 evt->broadcast = smp_timer_broadcast;
330#endif
331
332
309 evt->name = "bfin_core_timer"; 333 evt->name = "bfin_core_timer";
310 evt->rating = 350; 334 evt->rating = 350;
311 evt->irq = -1; 335 evt->irq = -1;
diff --git a/arch/blackfin/mach-bf561/include/mach/pll.h b/arch/blackfin/mach-bf561/include/mach/pll.h
index 7977db2f1c12..00bdacee9cc2 100644
--- a/arch/blackfin/mach-bf561/include/mach/pll.h
+++ b/arch/blackfin/mach-bf561/include/mach/pll.h
@@ -16,6 +16,7 @@
16#include <mach/irq.h> 16#include <mach/irq.h>
17 17
18#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32) 18#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
19#define SUPPLE_1_WAKEUP ((IRQ_SUPPLE_1 - (IRQ_CORETMR + 1)) % 32)
19 20
20static inline void 21static inline void
21bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2) 22bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
@@ -42,7 +43,8 @@ bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
42static inline void 43static inline void
43bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2) 44bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
44{ 45{
45 bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2); 46 bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP) |
47 IWR_ENABLE(SUPPLE_1_WAKEUP), 0, iwr0, iwr1, iwr2);
46} 48}
47 49
48#endif 50#endif
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index b2a6b77efad3..ab1c617b9cfc 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -84,7 +84,7 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
84 84
85 if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) { 85 if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
86 /* CoreB already running, sending ipi to wakeup it */ 86 /* CoreB already running, sending ipi to wakeup it */
87 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 87 smp_send_reschedule(cpu);
88 } else { 88 } else {
89 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ 89 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
90 bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT); 90 bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 0784a52389c8..d3464053dfdb 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/clockchips.h>
17#include <linux/profile.h> 18#include <linux/profile.h>
18#include <linux/errno.h> 19#include <linux/errno.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
@@ -47,9 +48,10 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
47 48
48struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
49 50
50#define BFIN_IPI_RESCHEDULE 0 51#define BFIN_IPI_TIMER 0
51#define BFIN_IPI_CALL_FUNC 1 52#define BFIN_IPI_RESCHEDULE 1
52#define BFIN_IPI_CPU_STOP 2 53#define BFIN_IPI_CALL_FUNC 2
54#define BFIN_IPI_CPU_STOP 3
53 55
54struct blackfin_flush_data { 56struct blackfin_flush_data {
55 unsigned long start; 57 unsigned long start;
@@ -160,6 +162,14 @@ static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
160 return IRQ_HANDLED; 162 return IRQ_HANDLED;
161} 163}
162 164
165DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
166void ipi_timer(void)
167{
168 int cpu = smp_processor_id();
169 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
170 evt->event_handler(evt);
171}
172
163static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) 173static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
164{ 174{
165 struct ipi_message *msg; 175 struct ipi_message *msg;
@@ -176,6 +186,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
176 while (msg_queue->count) { 186 while (msg_queue->count) {
177 msg = &msg_queue->ipi_message[msg_queue->head]; 187 msg = &msg_queue->ipi_message[msg_queue->head];
178 switch (msg->type) { 188 switch (msg->type) {
189 case BFIN_IPI_TIMER:
190 ipi_timer();
191 break;
179 case BFIN_IPI_RESCHEDULE: 192 case BFIN_IPI_RESCHEDULE:
180 scheduler_ipi(); 193 scheduler_ipi();
181 break; 194 break;
@@ -297,8 +310,6 @@ void smp_send_reschedule(int cpu)
297{ 310{
298 cpumask_t callmap; 311 cpumask_t callmap;
299 /* simply trigger an ipi */ 312 /* simply trigger an ipi */
300 if (cpu_is_offline(cpu))
301 return;
302 313
303 cpumask_clear(&callmap); 314 cpumask_clear(&callmap);
304 cpumask_set_cpu(cpu, &callmap); 315 cpumask_set_cpu(cpu, &callmap);
@@ -308,6 +319,16 @@ void smp_send_reschedule(int cpu)
308 return; 319 return;
309} 320}
310 321
322void smp_send_msg(const struct cpumask *mask, unsigned long type)
323{
324 smp_send_message(*mask, type, NULL, NULL, 0);
325}
326
327void smp_timer_broadcast(const struct cpumask *mask)
328{
329 smp_send_msg(mask, BFIN_IPI_TIMER);
330}
331
311void smp_send_stop(void) 332void smp_send_stop(void)
312{ 333{
313 cpumask_t callmap; 334 cpumask_t callmap;
@@ -326,17 +347,24 @@ void smp_send_stop(void)
326int __cpuinit __cpu_up(unsigned int cpu) 347int __cpuinit __cpu_up(unsigned int cpu)
327{ 348{
328 int ret; 349 int ret;
329 static struct task_struct *idle; 350 struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
351 struct task_struct *idle = ci->idle;
330 352
331 if (idle) 353 if (idle) {
332 free_task(idle); 354 free_task(idle);
333 355 idle = NULL;
334 idle = fork_idle(cpu);
335 if (IS_ERR(idle)) {
336 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
337 return PTR_ERR(idle);
338 } 356 }
339 357
358 if (!idle) {
359 idle = fork_idle(cpu);
360 if (IS_ERR(idle)) {
361 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
362 return PTR_ERR(idle);
363 }
364 ci->idle = idle;
365 } else {
366 init_idle(idle, cpu);
367 }
340 secondary_stack = task_stack_page(idle) + THREAD_SIZE; 368 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
341 369
342 ret = platform_boot_secondary(cpu, idle); 370 ret = platform_boot_secondary(cpu, idle);
@@ -411,6 +439,7 @@ void __cpuinit secondary_start_kernel(void)
411 439
412 bfin_setup_caches(cpu); 440 bfin_setup_caches(cpu);
413 441
442 notify_cpu_starting(cpu);
414 /* 443 /*
415 * Calibrate loops per jiffy value. 444 * Calibrate loops per jiffy value.
416 * IRQs need to be enabled here - D-cache can be invalidated 445 * IRQs need to be enabled here - D-cache can be invalidated