aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c158
1 files changed, 93 insertions, 65 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 25bba73b1be3..63e8de13c948 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,6 +20,20 @@
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
21static DEFINE_MUTEX(cpu_add_remove_lock); 21static DEFINE_MUTEX(cpu_add_remove_lock);
22 22
23/*
24 * The following two API's must be used when attempting
25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
26 */
27void cpu_maps_update_begin(void)
28{
29 mutex_lock(&cpu_add_remove_lock);
30}
31
32void cpu_maps_update_done(void)
33{
34 mutex_unlock(&cpu_add_remove_lock);
35}
36
23static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 37static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
24 38
25/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
@@ -27,6 +41,8 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
27 */ 41 */
28static int cpu_hotplug_disabled; 42static int cpu_hotplug_disabled;
29 43
44#ifdef CONFIG_HOTPLUG_CPU
45
30static struct { 46static struct {
31 struct task_struct *active_writer; 47 struct task_struct *active_writer;
32 struct mutex lock; /* Synchronizes accesses to refcount, */ 48 struct mutex lock; /* Synchronizes accesses to refcount, */
@@ -41,8 +57,6 @@ static struct {
41 .refcount = 0, 57 .refcount = 0,
42}; 58};
43 59
44#ifdef CONFIG_HOTPLUG_CPU
45
46void get_online_cpus(void) 60void get_online_cpus(void)
47{ 61{
48 might_sleep(); 62 might_sleep();
@@ -67,22 +81,6 @@ void put_online_cpus(void)
67} 81}
68EXPORT_SYMBOL_GPL(put_online_cpus); 82EXPORT_SYMBOL_GPL(put_online_cpus);
69 83
70#endif /* CONFIG_HOTPLUG_CPU */
71
72/*
73 * The following two API's must be used when attempting
74 * to serialize the updates to cpu_online_mask, cpu_present_mask.
75 */
76void cpu_maps_update_begin(void)
77{
78 mutex_lock(&cpu_add_remove_lock);
79}
80
81void cpu_maps_update_done(void)
82{
83 mutex_unlock(&cpu_add_remove_lock);
84}
85
86/* 84/*
87 * This ensures that the hotplug operation can begin only when the 85 * This ensures that the hotplug operation can begin only when the
88 * refcount goes to zero. 86 * refcount goes to zero.
@@ -124,6 +122,12 @@ static void cpu_hotplug_done(void)
124 cpu_hotplug.active_writer = NULL; 122 cpu_hotplug.active_writer = NULL;
125 mutex_unlock(&cpu_hotplug.lock); 123 mutex_unlock(&cpu_hotplug.lock);
126} 124}
125
126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */
130
127/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
128int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
129{ 133{
@@ -134,6 +138,30 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
134 return ret; 138 return ret;
135} 139}
136 140
141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
142 int *nr_calls)
143{
144 int ret;
145
146 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
147 nr_calls);
148
149 return notifier_to_errno(ret);
150}
151
152static int cpu_notify(unsigned long val, void *v)
153{
154 return __cpu_notify(val, v, -1, NULL);
155}
156
157static void cpu_notify_nofail(unsigned long val, void *v)
158{
159 int err;
160
161 err = cpu_notify(val, v);
162 BUG_ON(err);
163}
164
137#ifdef CONFIG_HOTPLUG_CPU 165#ifdef CONFIG_HOTPLUG_CPU
138 166
139EXPORT_SYMBOL(register_cpu_notifier); 167EXPORT_SYMBOL(register_cpu_notifier);
@@ -164,6 +192,7 @@ static inline void check_for_tasks(int cpu)
164} 192}
165 193
166struct take_cpu_down_param { 194struct take_cpu_down_param {
195 struct task_struct *caller;
167 unsigned long mod; 196 unsigned long mod;
168 void *hcpu; 197 void *hcpu;
169}; 198};
@@ -172,6 +201,7 @@ struct take_cpu_down_param {
172static int __ref take_cpu_down(void *_param) 201static int __ref take_cpu_down(void *_param)
173{ 202{
174 struct take_cpu_down_param *param = _param; 203 struct take_cpu_down_param *param = _param;
204 unsigned int cpu = (unsigned long)param->hcpu;
175 int err; 205 int err;
176 206
177 /* Ensure this CPU doesn't handle any more interrupts. */ 207 /* Ensure this CPU doesn't handle any more interrupts. */
@@ -179,9 +209,10 @@ static int __ref take_cpu_down(void *_param)
179 if (err < 0) 209 if (err < 0)
180 return err; 210 return err;
181 211
182 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 212 cpu_notify(CPU_DYING | param->mod, param->hcpu);
183 param->hcpu);
184 213
214 if (task_cpu(param->caller) == cpu)
215 move_task_off_dead_cpu(cpu, param->caller);
185 /* Force idle task to run as soon as we yield: it should 216 /* Force idle task to run as soon as we yield: it should
186 immediately notice cpu is offline and die quickly. */ 217 immediately notice cpu is offline and die quickly. */
187 sched_idle_next(); 218 sched_idle_next();
@@ -192,10 +223,10 @@ static int __ref take_cpu_down(void *_param)
192static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 223static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
193{ 224{
194 int err, nr_calls = 0; 225 int err, nr_calls = 0;
195 cpumask_var_t old_allowed;
196 void *hcpu = (void *)(long)cpu; 226 void *hcpu = (void *)(long)cpu;
197 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 227 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
198 struct take_cpu_down_param tcd_param = { 228 struct take_cpu_down_param tcd_param = {
229 .caller = current,
199 .mod = mod, 230 .mod = mod,
200 .hcpu = hcpu, 231 .hcpu = hcpu,
201 }; 232 };
@@ -206,38 +237,26 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
206 if (!cpu_online(cpu)) 237 if (!cpu_online(cpu))
207 return -EINVAL; 238 return -EINVAL;
208 239
209 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
210 return -ENOMEM;
211
212 cpu_hotplug_begin(); 240 cpu_hotplug_begin();
213 set_cpu_active(cpu, false); 241 set_cpu_active(cpu, false);
214 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 242 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
215 hcpu, -1, &nr_calls); 243 if (err) {
216 if (err == NOTIFY_BAD) {
217 set_cpu_active(cpu, true); 244 set_cpu_active(cpu, true);
218 245
219 nr_calls--; 246 nr_calls--;
220 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 247 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
221 hcpu, nr_calls, NULL);
222 printk("%s: attempt to take down CPU %u failed\n", 248 printk("%s: attempt to take down CPU %u failed\n",
223 __func__, cpu); 249 __func__, cpu);
224 err = -EINVAL;
225 goto out_release; 250 goto out_release;
226 } 251 }
227 252
228 /* Ensure that we are not runnable on dying cpu */
229 cpumask_copy(old_allowed, &current->cpus_allowed);
230 set_cpus_allowed_ptr(current, cpu_active_mask);
231
232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 253 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
233 if (err) { 254 if (err) {
234 set_cpu_active(cpu, true); 255 set_cpu_active(cpu, true);
235 /* CPU didn't die: tell everyone. Can't complain. */ 256 /* CPU didn't die: tell everyone. Can't complain. */
236 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 257 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
237 hcpu) == NOTIFY_BAD)
238 BUG();
239 258
240 goto out_allowed; 259 goto out_release;
241 } 260 }
242 BUG_ON(cpu_online(cpu)); 261 BUG_ON(cpu_online(cpu));
243 262
@@ -249,22 +268,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
249 __cpu_die(cpu); 268 __cpu_die(cpu);
250 269
251 /* CPU is completely dead: tell everyone. Too late to complain. */ 270 /* CPU is completely dead: tell everyone. Too late to complain. */
252 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 271 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
253 hcpu) == NOTIFY_BAD)
254 BUG();
255 272
256 check_for_tasks(cpu); 273 check_for_tasks(cpu);
257 274
258out_allowed:
259 set_cpus_allowed_ptr(current, old_allowed);
260out_release: 275out_release:
261 cpu_hotplug_done(); 276 cpu_hotplug_done();
262 if (!err) { 277 if (!err)
263 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 278 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
264 hcpu) == NOTIFY_BAD)
265 BUG();
266 }
267 free_cpumask_var(old_allowed);
268 return err; 279 return err;
269} 280}
270 281
@@ -272,9 +283,6 @@ int __ref cpu_down(unsigned int cpu)
272{ 283{
273 int err; 284 int err;
274 285
275 err = stop_machine_create();
276 if (err)
277 return err;
278 cpu_maps_update_begin(); 286 cpu_maps_update_begin();
279 287
280 if (cpu_hotplug_disabled) { 288 if (cpu_hotplug_disabled) {
@@ -286,7 +294,6 @@ int __ref cpu_down(unsigned int cpu)
286 294
287out: 295out:
288 cpu_maps_update_done(); 296 cpu_maps_update_done();
289 stop_machine_destroy();
290 return err; 297 return err;
291} 298}
292EXPORT_SYMBOL(cpu_down); 299EXPORT_SYMBOL(cpu_down);
@@ -303,13 +310,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
303 return -EINVAL; 310 return -EINVAL;
304 311
305 cpu_hotplug_begin(); 312 cpu_hotplug_begin();
306 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 313 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
307 -1, &nr_calls); 314 if (ret) {
308 if (ret == NOTIFY_BAD) {
309 nr_calls--; 315 nr_calls--;
310 printk("%s: attempt to bring up CPU %u failed\n", 316 printk("%s: attempt to bring up CPU %u failed\n",
311 __func__, cpu); 317 __func__, cpu);
312 ret = -EINVAL;
313 goto out_notify; 318 goto out_notify;
314 } 319 }
315 320
@@ -322,12 +327,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
322 set_cpu_active(cpu, true); 327 set_cpu_active(cpu, true);
323 328
324 /* Now call notifier in preparation. */ 329 /* Now call notifier in preparation. */
325 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 330 cpu_notify(CPU_ONLINE | mod, hcpu);
326 331
327out_notify: 332out_notify:
328 if (ret != 0) 333 if (ret != 0)
329 __raw_notifier_call_chain(&cpu_chain, 334 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
330 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
331 cpu_hotplug_done(); 335 cpu_hotplug_done();
332 336
333 return ret; 337 return ret;
@@ -336,6 +340,12 @@ out_notify:
336int __cpuinit cpu_up(unsigned int cpu) 340int __cpuinit cpu_up(unsigned int cpu)
337{ 341{
338 int err = 0; 342 int err = 0;
343
344#ifdef CONFIG_MEMORY_HOTPLUG
345 int nid;
346 pg_data_t *pgdat;
347#endif
348
339 if (!cpu_possible(cpu)) { 349 if (!cpu_possible(cpu)) {
340 printk(KERN_ERR "can't online cpu %d because it is not " 350 printk(KERN_ERR "can't online cpu %d because it is not "
341 "configured as may-hotadd at boot time\n", cpu); 351 "configured as may-hotadd at boot time\n", cpu);
@@ -346,6 +356,28 @@ int __cpuinit cpu_up(unsigned int cpu)
346 return -EINVAL; 356 return -EINVAL;
347 } 357 }
348 358
359#ifdef CONFIG_MEMORY_HOTPLUG
360 nid = cpu_to_node(cpu);
361 if (!node_online(nid)) {
362 err = mem_online_node(nid);
363 if (err)
364 return err;
365 }
366
367 pgdat = NODE_DATA(nid);
368 if (!pgdat) {
369 printk(KERN_ERR
370 "Can't online cpu %d due to NULL pgdat\n", cpu);
371 return -ENOMEM;
372 }
373
374 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
375 mutex_lock(&zonelists_mutex);
376 build_all_zonelists(NULL);
377 mutex_unlock(&zonelists_mutex);
378 }
379#endif
380
349 cpu_maps_update_begin(); 381 cpu_maps_update_begin();
350 382
351 if (cpu_hotplug_disabled) { 383 if (cpu_hotplug_disabled) {
@@ -367,9 +399,6 @@ int disable_nonboot_cpus(void)
367{ 399{
368 int cpu, first_cpu, error; 400 int cpu, first_cpu, error;
369 401
370 error = stop_machine_create();
371 if (error)
372 return error;
373 cpu_maps_update_begin(); 402 cpu_maps_update_begin();
374 first_cpu = cpumask_first(cpu_online_mask); 403 first_cpu = cpumask_first(cpu_online_mask);
375 /* 404 /*
@@ -400,7 +429,6 @@ int disable_nonboot_cpus(void)
400 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 429 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
401 } 430 }
402 cpu_maps_update_done(); 431 cpu_maps_update_done();
403 stop_machine_destroy();
404 return error; 432 return error;
405} 433}
406 434
@@ -467,7 +495,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
467 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 495 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
468 val = CPU_STARTING_FROZEN; 496 val = CPU_STARTING_FROZEN;
469#endif /* CONFIG_PM_SLEEP_SMP */ 497#endif /* CONFIG_PM_SLEEP_SMP */
470 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 498 cpu_notify(val, (void *)(long)cpu);
471} 499}
472 500
473#endif /* CONFIG_SMP */ 501#endif /* CONFIG_SMP */