aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2013-10-16 18:42:28 -0400
committerChris Zankel <chris@zankel.net>2014-01-14 13:19:59 -0500
commit49b424fedaf88d0fa9913082b8c1ccd012a8a972 (patch)
tree1a1fac57b578fe828b54b0367df4ed1fd94940b9 /arch
parentf615136c06a791364f5afa8b8ba965315a6440f1 (diff)
xtensa: implement CPU hotplug
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/xtensa/Kconfig9
-rw-r--r--arch/xtensa/include/asm/irq.h1
-rw-r--r--arch/xtensa/include/asm/smp.h9
-rw-r--r--arch/xtensa/kernel/head.S51
-rw-r--r--arch/xtensa/kernel/irq.c49
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--arch/xtensa/kernel/smp.c128
-rw-r--r--arch/xtensa/kernel/traps.c4
8 files changed, 248 insertions, 4 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 4b09c60b6b30..70a160be3464 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -140,6 +140,15 @@ config NR_CPUS
140 range 2 32 140 range 2 32
141 default "4" 141 default "4"
142 142
143config HOTPLUG_CPU
144 bool "Enable CPU hotplug support"
145 depends on SMP
146 help
147 Say Y here to allow turning CPUs off and on. CPUs can be
148 controlled through /sys/devices/system/cpu.
149
150 Say N if you want to disable CPU hotplug.
151
143config MATH_EMULATION 152config MATH_EMULATION
144 bool "Math emulation" 153 bool "Math emulation"
145 help 154 help
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index 7d194d462150..f71f88ea7646 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -45,6 +45,7 @@ static __inline__ int irq_canonicalize(int irq)
45struct irqaction; 45struct irqaction;
46struct irq_domain; 46struct irq_domain;
47 47
48void migrate_irqs(void);
48int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, 49int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize,
49 unsigned long int_irq, unsigned long ext_irq, 50 unsigned long int_irq, unsigned long ext_irq,
50 unsigned long *out_hwirq, unsigned int *out_type); 51 unsigned long *out_hwirq, unsigned int *out_type);
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
index 30ac58cc70df..4e43f5643891 100644
--- a/arch/xtensa/include/asm/smp.h
+++ b/arch/xtensa/include/asm/smp.h
@@ -29,6 +29,15 @@ void ipi_init(void);
29struct seq_file; 29struct seq_file;
30void show_ipi_list(struct seq_file *p, int prec); 30void show_ipi_list(struct seq_file *p, int prec);
31 31
32#ifdef CONFIG_HOTPLUG_CPU
33
34void __cpu_die(unsigned int cpu);
35int __cpu_disable(void);
36void cpu_die(void);
37void cpu_restart(void);
38
39#endif /* CONFIG_HOTPLUG_CPU */
40
32#endif /* CONFIG_SMP */ 41#endif /* CONFIG_SMP */
33 42
34#endif /* _XTENSA_SMP_H */ 43#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 74ec62c892bc..aeeb3cc8a410 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -103,7 +103,7 @@ _SetupMMU:
103 103
104ENDPROC(_start) 104ENDPROC(_start)
105 105
106 __INIT 106 __REF
107 .literal_position 107 .literal_position
108 108
109ENTRY(_startup) 109ENTRY(_startup)
@@ -302,6 +302,55 @@ should_never_return:
302 302
303ENDPROC(_startup) 303ENDPROC(_startup)
304 304
305#ifdef CONFIG_HOTPLUG_CPU
306
307ENTRY(cpu_restart)
308
309#if XCHAL_DCACHE_IS_WRITEBACK
310 ___flush_invalidate_dcache_all a2 a3
311#else
312 ___invalidate_dcache_all a2 a3
313#endif
314 memw
315 movi a2, CCON # MX External Register to Configure Cache
316 movi a3, 0
317 wer a3, a2
318 extw
319
320 rsr a0, prid
321 neg a2, a0
322 movi a3, cpu_start_id
323 s32i a2, a3, 0
324#if XCHAL_DCACHE_IS_WRITEBACK
325 dhwbi a3, 0
326#endif
3271:
328 l32i a2, a3, 0
329 dhi a3, 0
330 bne a2, a0, 1b
331
332 /*
333 * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
334 * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
335 * xt-gdb to single step via DEBUG exceptions received directly
336 * by ocd.
337 */
338 movi a1, 1
339 movi a0, 0
340 wsr a1, windowstart
341 wsr a0, windowbase
342 rsync
343
344 movi a1, LOCKLEVEL
345 wsr a1, ps
346 rsync
347
348 j _startup
349
350ENDPROC(cpu_restart)
351
352#endif /* CONFIG_HOTPLUG_CPU */
353
305/* 354/*
306 * DATA section 355 * DATA section
307 */ 356 */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index fad9e0059765..482868a2de6e 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -153,3 +153,52 @@ void __init init_IRQ(void)
153#endif 153#endif
154 variant_init_irq(); 154 variant_init_irq();
155} 155}
156
157#ifdef CONFIG_HOTPLUG_CPU
158static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
159{
160 struct irq_desc *desc = irq_to_desc(irq);
161 struct irq_chip *chip = irq_data_get_irq_chip(data);
162 unsigned long flags;
163
164 raw_spin_lock_irqsave(&desc->lock, flags);
165 if (chip->irq_set_affinity)
166 chip->irq_set_affinity(data, cpumask_of(cpu), false);
167 raw_spin_unlock_irqrestore(&desc->lock, flags);
168}
169
170/*
171 * The CPU has been marked offline. Migrate IRQs off this CPU. If
172 * the affinity settings do not allow other CPUs, force them onto any
173 * available CPU.
174 */
175void migrate_irqs(void)
176{
177 unsigned int i, cpu = smp_processor_id();
178 struct irq_desc *desc;
179
180 for_each_irq_desc(i, desc) {
181 struct irq_data *data = irq_desc_get_irq_data(desc);
182 unsigned int newcpu;
183
184 if (irqd_is_per_cpu(data))
185 continue;
186
187 if (!cpumask_test_cpu(cpu, data->affinity))
188 continue;
189
190 newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
191
192 if (newcpu >= nr_cpu_ids) {
193 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
194 i, cpu);
195
196 cpumask_setall(data->affinity);
197 newcpu = cpumask_any_and(data->affinity,
198 cpu_online_mask);
199 }
200
201 route_irq(data, i, newcpu);
202 }
203}
204#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index dfd8f52c05d8..d21bfa7a28e0 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -527,6 +527,7 @@ static int __init topology_init(void)
527 527
528 for_each_possible_cpu(i) { 528 for_each_possible_cpu(i) {
529 struct cpu *cpu = &per_cpu(cpu_data, i); 529 struct cpu *cpu = &per_cpu(cpu_data, i);
530 cpu->hotpluggable = !!i;
530 register_cpu(cpu, i); 531 register_cpu(cpu, i);
531 } 532 }
532 533
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 46bdd142a07d..1c7a209795e8 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -40,6 +40,11 @@
40# endif 40# endif
41#endif 41#endif
42 42
43static void system_invalidate_dcache_range(unsigned long start,
44 unsigned long size);
45static void system_flush_invalidate_dcache_range(unsigned long start,
46 unsigned long size);
47
43/* IPI (Inter Process Interrupt) */ 48/* IPI (Inter Process Interrupt) */
44 49
45#define IPI_IRQ 0 50#define IPI_IRQ 0
@@ -106,7 +111,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
106static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ 111static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
107static DECLARE_COMPLETION(cpu_running); 112static DECLARE_COMPLETION(cpu_running);
108 113
109void __init secondary_start_kernel(void) 114void secondary_start_kernel(void)
110{ 115{
111 struct mm_struct *mm = &init_mm; 116 struct mm_struct *mm = &init_mm;
112 unsigned int cpu = smp_processor_id(); 117 unsigned int cpu = smp_processor_id();
@@ -174,6 +179,9 @@ static void mx_cpu_stop(void *p)
174 __func__, cpu, run_stall_mask, get_er(MPSCORE)); 179 __func__, cpu, run_stall_mask, get_er(MPSCORE));
175} 180}
176 181
182#ifdef CONFIG_HOTPLUG_CPU
183unsigned long cpu_start_id __cacheline_aligned;
184#endif
177unsigned long cpu_start_ccount; 185unsigned long cpu_start_ccount;
178 186
179static int boot_secondary(unsigned int cpu, struct task_struct *ts) 187static int boot_secondary(unsigned int cpu, struct task_struct *ts)
@@ -182,6 +190,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
182 unsigned long ccount; 190 unsigned long ccount;
183 int i; 191 int i;
184 192
193#ifdef CONFIG_HOTPLUG_CPU
194 cpu_start_id = cpu;
195 system_flush_invalidate_dcache_range(
196 (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
197#endif
185 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 198 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
186 199
187 for (i = 0; i < 2; ++i) { 200 for (i = 0; i < 2; ++i) {
@@ -234,6 +247,85 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
234 return ret; 247 return ret;
235} 248}
236 249
250#ifdef CONFIG_HOTPLUG_CPU
251
252/*
253 * __cpu_disable runs on the processor to be shutdown.
254 */
255int __cpu_disable(void)
256{
257 unsigned int cpu = smp_processor_id();
258
259 /*
260 * Take this CPU offline. Once we clear this, we can't return,
261 * and we must not schedule until we're ready to give up the cpu.
262 */
263 set_cpu_online(cpu, false);
264
265 /*
266 * OK - migrate IRQs away from this CPU
267 */
268 migrate_irqs();
269
270 /*
271 * Flush user cache and TLB mappings, and then remove this CPU
272 * from the vm mask set of all processes.
273 */
274 local_flush_cache_all();
275 local_flush_tlb_all();
276 invalidate_page_directory();
277
278 clear_tasks_mm_cpumask(cpu);
279
280 return 0;
281}
282
283static void platform_cpu_kill(unsigned int cpu)
284{
285 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
286}
287
288/*
289 * called on the thread which is asking for a CPU to be shutdown -
290 * waits until shutdown has completed, or it is timed out.
291 */
292void __cpu_die(unsigned int cpu)
293{
294 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
295 while (time_before(jiffies, timeout)) {
296 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
297 sizeof(cpu_start_id));
298 if (cpu_start_id == -cpu) {
299 platform_cpu_kill(cpu);
300 return;
301 }
302 }
303 pr_err("CPU%u: unable to kill\n", cpu);
304}
305
306void arch_cpu_idle_dead(void)
307{
308 cpu_die();
309}
310/*
311 * Called from the idle thread for the CPU which has been shutdown.
312 *
313 * Note that we disable IRQs here, but do not re-enable them
314 * before returning to the caller. This is also the behaviour
315 * of the other hotplug-cpu capable cores, so presumably coming
316 * out of idle fixes this.
317 */
318void __ref cpu_die(void)
319{
320 idle_task_exit();
321 local_irq_disable();
322 __asm__ __volatile__(
323 " movi a2, cpu_restart\n"
324 " jx a2\n");
325}
326
327#endif /* CONFIG_HOTPLUG_CPU */
328
237enum ipi_msg_type { 329enum ipi_msg_type {
238 IPI_RESCHEDULE = 0, 330 IPI_RESCHEDULE = 0,
239 IPI_CALL_FUNC, 331 IPI_CALL_FUNC,
@@ -463,3 +555,37 @@ void flush_icache_range(unsigned long start, unsigned long end)
463 }; 555 };
464 on_each_cpu(ipi_flush_icache_range, &fd, 1); 556 on_each_cpu(ipi_flush_icache_range, &fd, 1);
465} 557}
558
559/* ------------------------------------------------------------------------- */
560
561static void ipi_invalidate_dcache_range(void *arg)
562{
563 struct flush_data *fd = arg;
564 __invalidate_dcache_range(fd->addr1, fd->addr2);
565}
566
567static void system_invalidate_dcache_range(unsigned long start,
568 unsigned long size)
569{
570 struct flush_data fd = {
571 .addr1 = start,
572 .addr2 = size,
573 };
574 on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
575}
576
577static void ipi_flush_invalidate_dcache_range(void *arg)
578{
579 struct flush_data *fd = arg;
580 __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
581}
582
583static void system_flush_invalidate_dcache_range(unsigned long start,
584 unsigned long size)
585{
586 struct flush_data fd = {
587 .addr1 = start,
588 .addr2 = size,
589 };
590 on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
591}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 3c0ff5746fe2..eebbfd8c26fc 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -332,7 +332,7 @@ void * __init trap_set_handler(int cause, void *handler)
332} 332}
333 333
334 334
335static void __init trap_init_excsave(void) 335static void trap_init_excsave(void)
336{ 336{
337 unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table); 337 unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
338 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1)); 338 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
@@ -384,7 +384,7 @@ void __init trap_init(void)
384} 384}
385 385
386#ifdef CONFIG_SMP 386#ifdef CONFIG_SMP
387void __init secondary_trap_init(void) 387void secondary_trap_init(void)
388{ 388{
389 trap_init_excsave(); 389 trap_init_excsave();
390} 390}