diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2013-10-16 18:42:28 -0400 |
---|---|---|
committer | Chris Zankel <chris@zankel.net> | 2014-01-14 13:19:59 -0500 |
commit | 49b424fedaf88d0fa9913082b8c1ccd012a8a972 (patch) | |
tree | 1a1fac57b578fe828b54b0367df4ed1fd94940b9 /arch/xtensa/kernel/smp.c | |
parent | f615136c06a791364f5afa8b8ba965315a6440f1 (diff) |
xtensa: implement CPU hotplug
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa/kernel/smp.c')
-rw-r--r-- | arch/xtensa/kernel/smp.c | 128 |
1 files changed, 127 insertions, 1 deletions
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 46bdd142a07d..1c7a209795e8 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c | |||
@@ -40,6 +40,11 @@ | |||
40 | # endif | 40 | # endif |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | static void system_invalidate_dcache_range(unsigned long start, | ||
44 | unsigned long size); | ||
45 | static void system_flush_invalidate_dcache_range(unsigned long start, | ||
46 | unsigned long size); | ||
47 | |||
43 | /* IPI (Inter Process Interrupt) */ | 48 | /* IPI (Inter Process Interrupt) */ |
44 | 49 | ||
45 | #define IPI_IRQ 0 | 50 | #define IPI_IRQ 0 |
@@ -106,7 +111,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
106 | static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ | 111 | static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ |
107 | static DECLARE_COMPLETION(cpu_running); | 112 | static DECLARE_COMPLETION(cpu_running); |
108 | 113 | ||
109 | void __init secondary_start_kernel(void) | 114 | void secondary_start_kernel(void) |
110 | { | 115 | { |
111 | struct mm_struct *mm = &init_mm; | 116 | struct mm_struct *mm = &init_mm; |
112 | unsigned int cpu = smp_processor_id(); | 117 | unsigned int cpu = smp_processor_id(); |
@@ -174,6 +179,9 @@ static void mx_cpu_stop(void *p) | |||
174 | __func__, cpu, run_stall_mask, get_er(MPSCORE)); | 179 | __func__, cpu, run_stall_mask, get_er(MPSCORE)); |
175 | } | 180 | } |
176 | 181 | ||
182 | #ifdef CONFIG_HOTPLUG_CPU | ||
183 | unsigned long cpu_start_id __cacheline_aligned; | ||
184 | #endif | ||
177 | unsigned long cpu_start_ccount; | 185 | unsigned long cpu_start_ccount; |
178 | 186 | ||
179 | static int boot_secondary(unsigned int cpu, struct task_struct *ts) | 187 | static int boot_secondary(unsigned int cpu, struct task_struct *ts) |
@@ -182,6 +190,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) | |||
182 | unsigned long ccount; | 190 | unsigned long ccount; |
183 | int i; | 191 | int i; |
184 | 192 | ||
193 | #ifdef CONFIG_HOTPLUG_CPU | ||
194 | cpu_start_id = cpu; | ||
195 | system_flush_invalidate_dcache_range( | ||
196 | (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); | ||
197 | #endif | ||
185 | smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); | 198 | smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); |
186 | 199 | ||
187 | for (i = 0; i < 2; ++i) { | 200 | for (i = 0; i < 2; ++i) { |
@@ -234,6 +247,85 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
234 | return ret; | 247 | return ret; |
235 | } | 248 | } |
236 | 249 | ||
250 | #ifdef CONFIG_HOTPLUG_CPU | ||
251 | |||
252 | /* | ||
253 | * __cpu_disable runs on the processor to be shutdown. | ||
254 | */ | ||
255 | int __cpu_disable(void) | ||
256 | { | ||
257 | unsigned int cpu = smp_processor_id(); | ||
258 | |||
259 | /* | ||
260 | * Take this CPU offline. Once we clear this, we can't return, | ||
261 | * and we must not schedule until we're ready to give up the cpu. | ||
262 | */ | ||
263 | set_cpu_online(cpu, false); | ||
264 | |||
265 | /* | ||
266 | * OK - migrate IRQs away from this CPU | ||
267 | */ | ||
268 | migrate_irqs(); | ||
269 | |||
270 | /* | ||
271 | * Flush user cache and TLB mappings, and then remove this CPU | ||
272 | * from the vm mask set of all processes. | ||
273 | */ | ||
274 | local_flush_cache_all(); | ||
275 | local_flush_tlb_all(); | ||
276 | invalidate_page_directory(); | ||
277 | |||
278 | clear_tasks_mm_cpumask(cpu); | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static void platform_cpu_kill(unsigned int cpu) | ||
284 | { | ||
285 | smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * called on the thread which is asking for a CPU to be shutdown - | ||
290 | * waits until shutdown has completed, or it is timed out. | ||
291 | */ | ||
292 | void __cpu_die(unsigned int cpu) | ||
293 | { | ||
294 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | ||
295 | while (time_before(jiffies, timeout)) { | ||
296 | system_invalidate_dcache_range((unsigned long)&cpu_start_id, | ||
297 | sizeof(cpu_start_id)); | ||
298 | if (cpu_start_id == -cpu) { | ||
299 | platform_cpu_kill(cpu); | ||
300 | return; | ||
301 | } | ||
302 | } | ||
303 | pr_err("CPU%u: unable to kill\n", cpu); | ||
304 | } | ||
305 | |||
306 | void arch_cpu_idle_dead(void) | ||
307 | { | ||
308 | cpu_die(); | ||
309 | } | ||
310 | /* | ||
311 | * Called from the idle thread for the CPU which has been shutdown. | ||
312 | * | ||
313 | * Note that we disable IRQs here, but do not re-enable them | ||
314 | * before returning to the caller. This is also the behaviour | ||
315 | * of the other hotplug-cpu capable cores, so presumably coming | ||
316 | * out of idle fixes this. | ||
317 | */ | ||
318 | void __ref cpu_die(void) | ||
319 | { | ||
320 | idle_task_exit(); | ||
321 | local_irq_disable(); | ||
322 | __asm__ __volatile__( | ||
323 | " movi a2, cpu_restart\n" | ||
324 | " jx a2\n"); | ||
325 | } | ||
326 | |||
327 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
328 | |||
237 | enum ipi_msg_type { | 329 | enum ipi_msg_type { |
238 | IPI_RESCHEDULE = 0, | 330 | IPI_RESCHEDULE = 0, |
239 | IPI_CALL_FUNC, | 331 | IPI_CALL_FUNC, |
@@ -463,3 +555,37 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
463 | }; | 555 | }; |
464 | on_each_cpu(ipi_flush_icache_range, &fd, 1); | 556 | on_each_cpu(ipi_flush_icache_range, &fd, 1); |
465 | } | 557 | } |
558 | |||
559 | /* ------------------------------------------------------------------------- */ | ||
560 | |||
561 | static void ipi_invalidate_dcache_range(void *arg) | ||
562 | { | ||
563 | struct flush_data *fd = arg; | ||
564 | __invalidate_dcache_range(fd->addr1, fd->addr2); | ||
565 | } | ||
566 | |||
567 | static void system_invalidate_dcache_range(unsigned long start, | ||
568 | unsigned long size) | ||
569 | { | ||
570 | struct flush_data fd = { | ||
571 | .addr1 = start, | ||
572 | .addr2 = size, | ||
573 | }; | ||
574 | on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); | ||
575 | } | ||
576 | |||
577 | static void ipi_flush_invalidate_dcache_range(void *arg) | ||
578 | { | ||
579 | struct flush_data *fd = arg; | ||
580 | __flush_invalidate_dcache_range(fd->addr1, fd->addr2); | ||
581 | } | ||
582 | |||
583 | static void system_flush_invalidate_dcache_range(unsigned long start, | ||
584 | unsigned long size) | ||
585 | { | ||
586 | struct flush_data fd = { | ||
587 | .addr1 = start, | ||
588 | .addr2 = size, | ||
589 | }; | ||
590 | on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); | ||
591 | } | ||