diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-04-26 06:08:55 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-04-26 06:08:55 -0400 |
commit | 763142d1efb56effe614d71185781796c4b83c78 (patch) | |
tree | f886c239786fd4be028e3a45006c5cc5c1b3a3f2 | |
parent | 8db2bc4559639680a94d4492ae4b7ce71298a74f (diff) |
sh: CPU hotplug support.
This adds preliminary support for CPU hotplug for SH SMP systems.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/Kconfig | 7 | ||||
-rw-r--r-- | arch/sh/include/asm/irq.h | 3 | ||||
-rw-r--r-- | arch/sh/include/asm/smp-ops.h | 16 | ||||
-rw-r--r-- | arch/sh/include/asm/smp.h | 19 | ||||
-rw-r--r-- | arch/sh/kernel/idle.c | 6 | ||||
-rw-r--r-- | arch/sh/kernel/irq.c | 42 | ||||
-rw-r--r-- | arch/sh/kernel/localtimer.c | 4 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 103 |
8 files changed, 194 insertions, 6 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ba86bfba95ac..d73bd4db5e84 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -706,6 +706,13 @@ config NR_CPUS | |||
706 | This is purely to save memory - each supported CPU adds | 706 | This is purely to save memory - each supported CPU adds |
707 | approximately eight kilobytes to the kernel image. | 707 | approximately eight kilobytes to the kernel image. |
708 | 708 | ||
709 | config HOTPLUG_CPU | ||
710 | bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" | ||
711 | depends on SMP && HOTPLUG && EXPERIMENTAL | ||
712 | help | ||
713 | Say Y here to experiment with turning CPUs off and on. CPUs | ||
714 | can be controlled through /sys/devices/system/cpu. | ||
715 | |||
709 | source "kernel/Kconfig.preempt" | 716 | source "kernel/Kconfig.preempt" |
710 | 717 | ||
711 | config GUSA | 718 | config GUSA |
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h index 99c593b3a827..02c2f0102cfa 100644 --- a/arch/sh/include/asm/irq.h +++ b/arch/sh/include/asm/irq.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __ASM_SH_IRQ_H | 1 | #ifndef __ASM_SH_IRQ_H |
2 | #define __ASM_SH_IRQ_H | 2 | #define __ASM_SH_IRQ_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
4 | #include <asm/machvec.h> | 5 | #include <asm/machvec.h> |
5 | 6 | ||
6 | /* | 7 | /* |
@@ -50,6 +51,8 @@ static inline int generic_irq_demux(int irq) | |||
50 | #define irq_demux(irq) sh_mv.mv_irq_demux(irq) | 51 | #define irq_demux(irq) sh_mv.mv_irq_demux(irq) |
51 | 52 | ||
52 | void init_IRQ(void); | 53 | void init_IRQ(void); |
54 | void migrate_irqs(void); | ||
55 | |||
53 | asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); | 56 | asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); |
54 | 57 | ||
55 | #ifdef CONFIG_IRQSTACKS | 58 | #ifdef CONFIG_IRQSTACKS |
diff --git a/arch/sh/include/asm/smp-ops.h b/arch/sh/include/asm/smp-ops.h index 0581b2a4c8ce..c590f76856f1 100644 --- a/arch/sh/include/asm/smp-ops.h +++ b/arch/sh/include/asm/smp-ops.h | |||
@@ -7,20 +7,27 @@ struct plat_smp_ops { | |||
7 | void (*prepare_cpus)(unsigned int max_cpus); | 7 | void (*prepare_cpus)(unsigned int max_cpus); |
8 | void (*start_cpu)(unsigned int cpu, unsigned long entry_point); | 8 | void (*start_cpu)(unsigned int cpu, unsigned long entry_point); |
9 | void (*send_ipi)(unsigned int cpu, unsigned int message); | 9 | void (*send_ipi)(unsigned int cpu, unsigned int message); |
10 | int (*cpu_disable)(unsigned int cpu); | ||
11 | void (*cpu_die)(unsigned int cpu); | ||
12 | void (*play_dead)(void); | ||
10 | }; | 13 | }; |
11 | 14 | ||
15 | extern struct plat_smp_ops *mp_ops; | ||
12 | extern struct plat_smp_ops shx3_smp_ops; | 16 | extern struct plat_smp_ops shx3_smp_ops; |
13 | 17 | ||
14 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
15 | 19 | ||
16 | static inline void plat_smp_setup(void) | 20 | static inline void plat_smp_setup(void) |
17 | { | 21 | { |
18 | extern struct plat_smp_ops *mp_ops; /* private */ | ||
19 | |||
20 | BUG_ON(!mp_ops); | 22 | BUG_ON(!mp_ops); |
21 | mp_ops->smp_setup(); | 23 | mp_ops->smp_setup(); |
22 | } | 24 | } |
23 | 25 | ||
26 | static inline void play_dead(void) | ||
27 | { | ||
28 | mp_ops->play_dead(); | ||
29 | } | ||
30 | |||
24 | extern void register_smp_ops(struct plat_smp_ops *ops); | 31 | extern void register_smp_ops(struct plat_smp_ops *ops); |
25 | 32 | ||
26 | #else | 33 | #else |
@@ -34,6 +41,11 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) | |||
34 | { | 41 | { |
35 | } | 42 | } |
36 | 43 | ||
44 | static inline void play_dead(void) | ||
45 | { | ||
46 | BUG(); | ||
47 | } | ||
48 | |||
37 | #endif /* CONFIG_SMP */ | 49 | #endif /* CONFIG_SMP */ |
38 | 50 | ||
39 | #endif /* __ASM_SH_SMP_OPS_H */ | 51 | #endif /* __ASM_SH_SMP_OPS_H */ |
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index da5135b2579e..9070d943ddde 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h | |||
@@ -38,9 +38,26 @@ void smp_timer_broadcast(const struct cpumask *mask); | |||
38 | 38 | ||
39 | void local_timer_interrupt(void); | 39 | void local_timer_interrupt(void); |
40 | void local_timer_setup(unsigned int cpu); | 40 | void local_timer_setup(unsigned int cpu); |
41 | void local_timer_stop(unsigned int cpu); | ||
41 | 42 | ||
42 | void arch_send_call_function_single_ipi(int cpu); | 43 | void arch_send_call_function_single_ipi(int cpu); |
43 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 44 | void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
45 | |||
46 | void native_play_dead(void); | ||
47 | void native_cpu_die(unsigned int cpu); | ||
48 | int native_cpu_disable(unsigned int cpu); | ||
49 | |||
50 | #ifdef CONFIG_HOTPLUG_CPU | ||
51 | void play_dead_common(void); | ||
52 | extern int __cpu_disable(void); | ||
53 | |||
54 | static inline void __cpu_die(unsigned int cpu) | ||
55 | { | ||
56 | extern struct plat_smp_ops *mp_ops; /* private */ | ||
57 | |||
58 | mp_ops->cpu_die(cpu); | ||
59 | } | ||
60 | #endif | ||
44 | 61 | ||
45 | static inline int hard_smp_processor_id(void) | 62 | static inline int hard_smp_processor_id(void) |
46 | { | 63 | { |
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 204005329fe1..425d604e3a28 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/pgalloc.h> | 19 | #include <asm/pgalloc.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | #include <asm/smp.h> | ||
22 | 23 | ||
23 | void (*pm_idle)(void) = NULL; | 24 | void (*pm_idle)(void) = NULL; |
24 | 25 | ||
@@ -89,10 +90,13 @@ void cpu_idle(void) | |||
89 | while (1) { | 90 | while (1) { |
90 | tick_nohz_stop_sched_tick(1); | 91 | tick_nohz_stop_sched_tick(1); |
91 | 92 | ||
92 | while (!need_resched() && cpu_online(cpu)) { | 93 | while (!need_resched()) { |
93 | check_pgt_cache(); | 94 | check_pgt_cache(); |
94 | rmb(); | 95 | rmb(); |
95 | 96 | ||
97 | if (cpu_is_offline(cpu)) | ||
98 | play_dead(); | ||
99 | |||
96 | local_irq_disable(); | 100 | local_irq_disable(); |
97 | /* Don't trace irqs off for idle */ | 101 | /* Don't trace irqs off for idle */ |
98 | stop_critical_timings(); | 102 | stop_critical_timings(); |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index f6a9319c28e2..257de1f0692b 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/kernel_stat.h> | 12 | #include <linux/kernel_stat.h> |
13 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
14 | #include <linux/ftrace.h> | 14 | #include <linux/ftrace.h> |
15 | #include <linux/delay.h> | ||
15 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
16 | #include <asm/machvec.h> | 17 | #include <asm/machvec.h> |
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
@@ -292,3 +293,44 @@ int __init arch_probe_nr_irqs(void) | |||
292 | return 0; | 293 | return 0; |
293 | } | 294 | } |
294 | #endif | 295 | #endif |
296 | |||
297 | #ifdef CONFIG_HOTPLUG_CPU | ||
298 | static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | ||
299 | { | ||
300 | printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", | ||
301 | irq, desc->node, cpu); | ||
302 | |||
303 | raw_spin_lock_irq(&desc->lock); | ||
304 | desc->chip->set_affinity(irq, cpumask_of(cpu)); | ||
305 | raw_spin_unlock_irq(&desc->lock); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | ||
310 | * the affinity settings do not allow other CPUs, force them onto any | ||
311 | * available CPU. | ||
312 | */ | ||
313 | void migrate_irqs(void) | ||
314 | { | ||
315 | struct irq_desc *desc; | ||
316 | unsigned int irq, cpu = smp_processor_id(); | ||
317 | |||
318 | for_each_irq_desc(irq, desc) { | ||
319 | if (desc->node == cpu) { | ||
320 | unsigned int newcpu = cpumask_any_and(desc->affinity, | ||
321 | cpu_online_mask); | ||
322 | if (newcpu >= nr_cpu_ids) { | ||
323 | if (printk_ratelimit()) | ||
324 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | ||
325 | irq, cpu); | ||
326 | |||
327 | cpumask_setall(desc->affinity); | ||
328 | newcpu = cpumask_any_and(desc->affinity, | ||
329 | cpu_online_mask); | ||
330 | } | ||
331 | |||
332 | route_irq(desc, irq, newcpu); | ||
333 | } | ||
334 | } | ||
335 | } | ||
336 | #endif | ||
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 865a2f1029b1..8bfc6dfa8b94 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c | |||
@@ -60,3 +60,7 @@ void local_timer_setup(unsigned int cpu) | |||
60 | 60 | ||
61 | clockevents_register_device(clk); | 61 | clockevents_register_device(clk); |
62 | } | 62 | } |
63 | |||
64 | void local_timer_stop(unsigned int cpu) | ||
65 | { | ||
66 | } | ||
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 21e7f8a9f3e4..86cd6f94b53b 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -79,6 +79,105 @@ void __init smp_prepare_boot_cpu(void) | |||
79 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | 79 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_HOTPLUG_CPU | ||
83 | void native_cpu_die(unsigned int cpu) | ||
84 | { | ||
85 | unsigned int i; | ||
86 | |||
87 | for (i = 0; i < 10; i++) { | ||
88 | smp_rmb(); | ||
89 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
90 | if (system_state == SYSTEM_RUNNING) | ||
91 | pr_info("CPU %u is now offline\n", cpu); | ||
92 | |||
93 | return; | ||
94 | } | ||
95 | |||
96 | msleep(100); | ||
97 | } | ||
98 | |||
99 | pr_err("CPU %u didn't die...\n", cpu); | ||
100 | } | ||
101 | |||
102 | int native_cpu_disable(unsigned int cpu) | ||
103 | { | ||
104 | return cpu == 0 ? -EPERM : 0; | ||
105 | } | ||
106 | |||
107 | void play_dead_common(void) | ||
108 | { | ||
109 | idle_task_exit(); | ||
110 | irq_ctx_exit(raw_smp_processor_id()); | ||
111 | mb(); | ||
112 | |||
113 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
114 | local_irq_disable(); | ||
115 | } | ||
116 | |||
117 | void native_play_dead(void) | ||
118 | { | ||
119 | play_dead_common(); | ||
120 | } | ||
121 | |||
122 | int __cpu_disable(void) | ||
123 | { | ||
124 | unsigned int cpu = smp_processor_id(); | ||
125 | struct task_struct *p; | ||
126 | int ret; | ||
127 | |||
128 | ret = mp_ops->cpu_disable(cpu); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | |||
132 | /* | ||
133 | * Take this CPU offline. Once we clear this, we can't return, | ||
134 | * and we must not schedule until we're ready to give up the cpu. | ||
135 | */ | ||
136 | set_cpu_online(cpu, false); | ||
137 | |||
138 | /* | ||
139 | * OK - migrate IRQs away from this CPU | ||
140 | */ | ||
141 | migrate_irqs(); | ||
142 | |||
143 | /* | ||
144 | * Stop the local timer for this CPU. | ||
145 | */ | ||
146 | local_timer_stop(cpu); | ||
147 | |||
148 | /* | ||
149 | * Flush user cache and TLB mappings, and then remove this CPU | ||
150 | * from the vm mask set of all processes. | ||
151 | */ | ||
152 | flush_cache_all(); | ||
153 | local_flush_tlb_all(); | ||
154 | |||
155 | read_lock(&tasklist_lock); | ||
156 | for_each_process(p) | ||
157 | if (p->mm) | ||
158 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
159 | read_unlock(&tasklist_lock); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
164 | int native_cpu_disable(void) | ||
165 | { | ||
166 | return -ENOSYS; | ||
167 | } | ||
168 | |||
169 | void native_cpu_die(unsigned int cpu) | ||
170 | { | ||
171 | /* We said "no" in __cpu_disable */ | ||
172 | BUG(); | ||
173 | } | ||
174 | |||
175 | void native_play_dead(void) | ||
176 | { | ||
177 | BUG(); | ||
178 | } | ||
179 | #endif | ||
180 | |||
82 | asmlinkage void __cpuinit start_secondary(void) | 181 | asmlinkage void __cpuinit start_secondary(void) |
83 | { | 182 | { |
84 | unsigned int cpu = smp_processor_id(); | 183 | unsigned int cpu = smp_processor_id(); |
@@ -88,8 +187,8 @@ asmlinkage void __cpuinit start_secondary(void) | |||
88 | atomic_inc(&mm->mm_count); | 187 | atomic_inc(&mm->mm_count); |
89 | atomic_inc(&mm->mm_users); | 188 | atomic_inc(&mm->mm_users); |
90 | current->active_mm = mm; | 189 | current->active_mm = mm; |
91 | BUG_ON(current->mm); | ||
92 | enter_lazy_tlb(mm, current); | 190 | enter_lazy_tlb(mm, current); |
191 | local_flush_tlb_all(); | ||
93 | 192 | ||
94 | per_cpu_trap_init(); | 193 | per_cpu_trap_init(); |
95 | 194 | ||
@@ -156,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
156 | break; | 255 | break; |
157 | 256 | ||
158 | udelay(10); | 257 | udelay(10); |
258 | barrier(); | ||
159 | } | 259 | } |
160 | 260 | ||
161 | if (cpu_online(cpu)) | 261 | if (cpu_online(cpu)) |
@@ -270,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm) | |||
270 | * behalf of debugees, kswapd stealing pages from another process etc). | 370 | * behalf of debugees, kswapd stealing pages from another process etc). |
271 | * Kanoj 07/00. | 371 | * Kanoj 07/00. |
272 | */ | 372 | */ |
273 | |||
274 | void flush_tlb_mm(struct mm_struct *mm) | 373 | void flush_tlb_mm(struct mm_struct *mm) |
275 | { | 374 | { |
276 | preempt_disable(); | 375 | preempt_disable(); |