diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-04-26 06:08:55 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-04-26 06:08:55 -0400 |
commit | 763142d1efb56effe614d71185781796c4b83c78 (patch) | |
tree | f886c239786fd4be028e3a45006c5cc5c1b3a3f2 /arch/sh/kernel/smp.c | |
parent | 8db2bc4559639680a94d4492ae4b7ce71298a74f (diff) |
sh: CPU hotplug support.
This adds preliminary support for CPU hotplug for SH SMP systems.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/smp.c')
-rw-r--r-- | arch/sh/kernel/smp.c | 103 |
1 files changed, 101 insertions, 2 deletions
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 21e7f8a9f3e4..86cd6f94b53b 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -79,6 +79,105 @@ void __init smp_prepare_boot_cpu(void) | |||
79 | per_cpu(cpu_state, cpu) = CPU_ONLINE; | 79 | per_cpu(cpu_state, cpu) = CPU_ONLINE; |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_HOTPLUG_CPU | ||
83 | void native_cpu_die(unsigned int cpu) | ||
84 | { | ||
85 | unsigned int i; | ||
86 | |||
87 | for (i = 0; i < 10; i++) { | ||
88 | smp_rmb(); | ||
89 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
90 | if (system_state == SYSTEM_RUNNING) | ||
91 | pr_info("CPU %u is now offline\n", cpu); | ||
92 | |||
93 | return; | ||
94 | } | ||
95 | |||
96 | msleep(100); | ||
97 | } | ||
98 | |||
99 | pr_err("CPU %u didn't die...\n", cpu); | ||
100 | } | ||
101 | |||
102 | int native_cpu_disable(unsigned int cpu) | ||
103 | { | ||
104 | return cpu == 0 ? -EPERM : 0; | ||
105 | } | ||
106 | |||
107 | void play_dead_common(void) | ||
108 | { | ||
109 | idle_task_exit(); | ||
110 | irq_ctx_exit(raw_smp_processor_id()); | ||
111 | mb(); | ||
112 | |||
113 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
114 | local_irq_disable(); | ||
115 | } | ||
116 | |||
117 | void native_play_dead(void) | ||
118 | { | ||
119 | play_dead_common(); | ||
120 | } | ||
121 | |||
122 | int __cpu_disable(void) | ||
123 | { | ||
124 | unsigned int cpu = smp_processor_id(); | ||
125 | struct task_struct *p; | ||
126 | int ret; | ||
127 | |||
128 | ret = mp_ops->cpu_disable(cpu); | ||
129 | if (ret) | ||
130 | return ret; | ||
131 | |||
132 | /* | ||
133 | * Take this CPU offline. Once we clear this, we can't return, | ||
134 | * and we must not schedule until we're ready to give up the cpu. | ||
135 | */ | ||
136 | set_cpu_online(cpu, false); | ||
137 | |||
138 | /* | ||
139 | * OK - migrate IRQs away from this CPU | ||
140 | */ | ||
141 | migrate_irqs(); | ||
142 | |||
143 | /* | ||
144 | * Stop the local timer for this CPU. | ||
145 | */ | ||
146 | local_timer_stop(cpu); | ||
147 | |||
148 | /* | ||
149 | * Flush user cache and TLB mappings, and then remove this CPU | ||
150 | * from the vm mask set of all processes. | ||
151 | */ | ||
152 | flush_cache_all(); | ||
153 | local_flush_tlb_all(); | ||
154 | |||
155 | read_lock(&tasklist_lock); | ||
156 | for_each_process(p) | ||
157 | if (p->mm) | ||
158 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
159 | read_unlock(&tasklist_lock); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
164 | int native_cpu_disable(void) | ||
165 | { | ||
166 | return -ENOSYS; | ||
167 | } | ||
168 | |||
169 | void native_cpu_die(unsigned int cpu) | ||
170 | { | ||
171 | /* We said "no" in __cpu_disable */ | ||
172 | BUG(); | ||
173 | } | ||
174 | |||
175 | void native_play_dead(void) | ||
176 | { | ||
177 | BUG(); | ||
178 | } | ||
179 | #endif | ||
180 | |||
82 | asmlinkage void __cpuinit start_secondary(void) | 181 | asmlinkage void __cpuinit start_secondary(void) |
83 | { | 182 | { |
84 | unsigned int cpu = smp_processor_id(); | 183 | unsigned int cpu = smp_processor_id(); |
@@ -88,8 +187,8 @@ asmlinkage void __cpuinit start_secondary(void) | |||
88 | atomic_inc(&mm->mm_count); | 187 | atomic_inc(&mm->mm_count); |
89 | atomic_inc(&mm->mm_users); | 188 | atomic_inc(&mm->mm_users); |
90 | current->active_mm = mm; | 189 | current->active_mm = mm; |
91 | BUG_ON(current->mm); | ||
92 | enter_lazy_tlb(mm, current); | 190 | enter_lazy_tlb(mm, current); |
191 | local_flush_tlb_all(); | ||
93 | 192 | ||
94 | per_cpu_trap_init(); | 193 | per_cpu_trap_init(); |
95 | 194 | ||
@@ -156,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
156 | break; | 255 | break; |
157 | 256 | ||
158 | udelay(10); | 257 | udelay(10); |
258 | barrier(); | ||
159 | } | 259 | } |
160 | 260 | ||
161 | if (cpu_online(cpu)) | 261 | if (cpu_online(cpu)) |
@@ -270,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm) | |||
270 | * behalf of debugees, kswapd stealing pages from another process etc). | 370 | * behalf of debugees, kswapd stealing pages from another process etc). |
271 | * Kanoj 07/00. | 371 | * Kanoj 07/00. |
272 | */ | 372 | */ |
273 | |||
274 | void flush_tlb_mm(struct mm_struct *mm) | 373 | void flush_tlb_mm(struct mm_struct *mm) |
275 | { | 374 | { |
276 | preempt_disable(); | 375 | preempt_disable(); |