aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c165
1 files changed, 92 insertions, 73 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 25bba73b1be3..f6e726f18491 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,13 +20,29 @@
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
21static DEFINE_MUTEX(cpu_add_remove_lock); 21static DEFINE_MUTEX(cpu_add_remove_lock);
22 22
23static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 23/*
24 * The following two API's must be used when attempting
25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
26 */
27void cpu_maps_update_begin(void)
28{
29 mutex_lock(&cpu_add_remove_lock);
30}
31
32void cpu_maps_update_done(void)
33{
34 mutex_unlock(&cpu_add_remove_lock);
35}
36
37static RAW_NOTIFIER_HEAD(cpu_chain);
24 38
25/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
26 * Should always be manipulated under cpu_add_remove_lock 40 * Should always be manipulated under cpu_add_remove_lock
27 */ 41 */
28static int cpu_hotplug_disabled; 42static int cpu_hotplug_disabled;
29 43
44#ifdef CONFIG_HOTPLUG_CPU
45
30static struct { 46static struct {
31 struct task_struct *active_writer; 47 struct task_struct *active_writer;
32 struct mutex lock; /* Synchronizes accesses to refcount, */ 48 struct mutex lock; /* Synchronizes accesses to refcount, */
@@ -41,8 +57,6 @@ static struct {
41 .refcount = 0, 57 .refcount = 0,
42}; 58};
43 59
44#ifdef CONFIG_HOTPLUG_CPU
45
46void get_online_cpus(void) 60void get_online_cpus(void)
47{ 61{
48 might_sleep(); 62 might_sleep();
@@ -67,22 +81,6 @@ void put_online_cpus(void)
67} 81}
68EXPORT_SYMBOL_GPL(put_online_cpus); 82EXPORT_SYMBOL_GPL(put_online_cpus);
69 83
70#endif /* CONFIG_HOTPLUG_CPU */
71
72/*
73 * The following two API's must be used when attempting
74 * to serialize the updates to cpu_online_mask, cpu_present_mask.
75 */
76void cpu_maps_update_begin(void)
77{
78 mutex_lock(&cpu_add_remove_lock);
79}
80
81void cpu_maps_update_done(void)
82{
83 mutex_unlock(&cpu_add_remove_lock);
84}
85
86/* 84/*
87 * This ensures that the hotplug operation can begin only when the 85 * This ensures that the hotplug operation can begin only when the
88 * refcount goes to zero. 86 * refcount goes to zero.
@@ -124,6 +122,12 @@ static void cpu_hotplug_done(void)
124 cpu_hotplug.active_writer = NULL; 122 cpu_hotplug.active_writer = NULL;
125 mutex_unlock(&cpu_hotplug.lock); 123 mutex_unlock(&cpu_hotplug.lock);
126} 124}
125
126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */
130
127/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
128int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
129{ 133{
@@ -134,8 +138,29 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
134 return ret; 138 return ret;
135} 139}
136 140
141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
142 int *nr_calls)
143{
144 int ret;
145
146 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
147 nr_calls);
148
149 return notifier_to_errno(ret);
150}
151
152static int cpu_notify(unsigned long val, void *v)
153{
154 return __cpu_notify(val, v, -1, NULL);
155}
156
137#ifdef CONFIG_HOTPLUG_CPU 157#ifdef CONFIG_HOTPLUG_CPU
138 158
159static void cpu_notify_nofail(unsigned long val, void *v)
160{
161 BUG_ON(cpu_notify(val, v));
162}
163
139EXPORT_SYMBOL(register_cpu_notifier); 164EXPORT_SYMBOL(register_cpu_notifier);
140 165
141void __ref unregister_cpu_notifier(struct notifier_block *nb) 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
@@ -164,6 +189,7 @@ static inline void check_for_tasks(int cpu)
164} 189}
165 190
166struct take_cpu_down_param { 191struct take_cpu_down_param {
192 struct task_struct *caller;
167 unsigned long mod; 193 unsigned long mod;
168 void *hcpu; 194 void *hcpu;
169}; 195};
@@ -172,6 +198,7 @@ struct take_cpu_down_param {
172static int __ref take_cpu_down(void *_param) 198static int __ref take_cpu_down(void *_param)
173{ 199{
174 struct take_cpu_down_param *param = _param; 200 struct take_cpu_down_param *param = _param;
201 unsigned int cpu = (unsigned long)param->hcpu;
175 int err; 202 int err;
176 203
177 /* Ensure this CPU doesn't handle any more interrupts. */ 204 /* Ensure this CPU doesn't handle any more interrupts. */
@@ -179,9 +206,10 @@ static int __ref take_cpu_down(void *_param)
179 if (err < 0) 206 if (err < 0)
180 return err; 207 return err;
181 208
182 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 209 cpu_notify(CPU_DYING | param->mod, param->hcpu);
183 param->hcpu);
184 210
211 if (task_cpu(param->caller) == cpu)
212 move_task_off_dead_cpu(cpu, param->caller);
185 /* Force idle task to run as soon as we yield: it should 213 /* Force idle task to run as soon as we yield: it should
186 immediately notice cpu is offline and die quickly. */ 214 immediately notice cpu is offline and die quickly. */
187 sched_idle_next(); 215 sched_idle_next();
@@ -192,10 +220,10 @@ static int __ref take_cpu_down(void *_param)
192static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 220static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
193{ 221{
194 int err, nr_calls = 0; 222 int err, nr_calls = 0;
195 cpumask_var_t old_allowed;
196 void *hcpu = (void *)(long)cpu; 223 void *hcpu = (void *)(long)cpu;
197 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 224 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
198 struct take_cpu_down_param tcd_param = { 225 struct take_cpu_down_param tcd_param = {
226 .caller = current,
199 .mod = mod, 227 .mod = mod,
200 .hcpu = hcpu, 228 .hcpu = hcpu,
201 }; 229 };
@@ -206,38 +234,22 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
206 if (!cpu_online(cpu)) 234 if (!cpu_online(cpu))
207 return -EINVAL; 235 return -EINVAL;
208 236
209 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
210 return -ENOMEM;
211
212 cpu_hotplug_begin(); 237 cpu_hotplug_begin();
213 set_cpu_active(cpu, false); 238 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
214 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 239 if (err) {
215 hcpu, -1, &nr_calls);
216 if (err == NOTIFY_BAD) {
217 set_cpu_active(cpu, true);
218
219 nr_calls--; 240 nr_calls--;
220 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 241 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
221 hcpu, nr_calls, NULL);
222 printk("%s: attempt to take down CPU %u failed\n", 242 printk("%s: attempt to take down CPU %u failed\n",
223 __func__, cpu); 243 __func__, cpu);
224 err = -EINVAL;
225 goto out_release; 244 goto out_release;
226 } 245 }
227 246
228 /* Ensure that we are not runnable on dying cpu */
229 cpumask_copy(old_allowed, &current->cpus_allowed);
230 set_cpus_allowed_ptr(current, cpu_active_mask);
231
232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 247 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
233 if (err) { 248 if (err) {
234 set_cpu_active(cpu, true);
235 /* CPU didn't die: tell everyone. Can't complain. */ 249 /* CPU didn't die: tell everyone. Can't complain. */
236 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 250 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
237 hcpu) == NOTIFY_BAD)
238 BUG();
239 251
240 goto out_allowed; 252 goto out_release;
241 } 253 }
242 BUG_ON(cpu_online(cpu)); 254 BUG_ON(cpu_online(cpu));
243 255
@@ -249,22 +261,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
249 __cpu_die(cpu); 261 __cpu_die(cpu);
250 262
251 /* CPU is completely dead: tell everyone. Too late to complain. */ 263 /* CPU is completely dead: tell everyone. Too late to complain. */
252 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 264 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
253 hcpu) == NOTIFY_BAD)
254 BUG();
255 265
256 check_for_tasks(cpu); 266 check_for_tasks(cpu);
257 267
258out_allowed:
259 set_cpus_allowed_ptr(current, old_allowed);
260out_release: 268out_release:
261 cpu_hotplug_done(); 269 cpu_hotplug_done();
262 if (!err) { 270 if (!err)
263 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 271 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
264 hcpu) == NOTIFY_BAD)
265 BUG();
266 }
267 free_cpumask_var(old_allowed);
268 return err; 272 return err;
269} 273}
270 274
@@ -272,9 +276,6 @@ int __ref cpu_down(unsigned int cpu)
272{ 276{
273 int err; 277 int err;
274 278
275 err = stop_machine_create();
276 if (err)
277 return err;
278 cpu_maps_update_begin(); 279 cpu_maps_update_begin();
279 280
280 if (cpu_hotplug_disabled) { 281 if (cpu_hotplug_disabled) {
@@ -286,7 +287,6 @@ int __ref cpu_down(unsigned int cpu)
286 287
287out: 288out:
288 cpu_maps_update_done(); 289 cpu_maps_update_done();
289 stop_machine_destroy();
290 return err; 290 return err;
291} 291}
292EXPORT_SYMBOL(cpu_down); 292EXPORT_SYMBOL(cpu_down);
@@ -303,13 +303,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
303 return -EINVAL; 303 return -EINVAL;
304 304
305 cpu_hotplug_begin(); 305 cpu_hotplug_begin();
306 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 306 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
307 -1, &nr_calls); 307 if (ret) {
308 if (ret == NOTIFY_BAD) {
309 nr_calls--; 308 nr_calls--;
310 printk("%s: attempt to bring up CPU %u failed\n", 309 printk("%s: attempt to bring up CPU %u failed\n",
311 __func__, cpu); 310 __func__, cpu);
312 ret = -EINVAL;
313 goto out_notify; 311 goto out_notify;
314 } 312 }
315 313
@@ -319,15 +317,12 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
319 goto out_notify; 317 goto out_notify;
320 BUG_ON(!cpu_online(cpu)); 318 BUG_ON(!cpu_online(cpu));
321 319
322 set_cpu_active(cpu, true);
323
324 /* Now call notifier in preparation. */ 320 /* Now call notifier in preparation. */
325 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 321 cpu_notify(CPU_ONLINE | mod, hcpu);
326 322
327out_notify: 323out_notify:
328 if (ret != 0) 324 if (ret != 0)
329 __raw_notifier_call_chain(&cpu_chain, 325 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
330 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
331 cpu_hotplug_done(); 326 cpu_hotplug_done();
332 327
333 return ret; 328 return ret;
@@ -336,6 +331,12 @@ out_notify:
336int __cpuinit cpu_up(unsigned int cpu) 331int __cpuinit cpu_up(unsigned int cpu)
337{ 332{
338 int err = 0; 333 int err = 0;
334
335#ifdef CONFIG_MEMORY_HOTPLUG
336 int nid;
337 pg_data_t *pgdat;
338#endif
339
339 if (!cpu_possible(cpu)) { 340 if (!cpu_possible(cpu)) {
340 printk(KERN_ERR "can't online cpu %d because it is not " 341 printk(KERN_ERR "can't online cpu %d because it is not "
341 "configured as may-hotadd at boot time\n", cpu); 342 "configured as may-hotadd at boot time\n", cpu);
@@ -346,6 +347,28 @@ int __cpuinit cpu_up(unsigned int cpu)
346 return -EINVAL; 347 return -EINVAL;
347 } 348 }
348 349
350#ifdef CONFIG_MEMORY_HOTPLUG
351 nid = cpu_to_node(cpu);
352 if (!node_online(nid)) {
353 err = mem_online_node(nid);
354 if (err)
355 return err;
356 }
357
358 pgdat = NODE_DATA(nid);
359 if (!pgdat) {
360 printk(KERN_ERR
361 "Can't online cpu %d due to NULL pgdat\n", cpu);
362 return -ENOMEM;
363 }
364
365 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
366 mutex_lock(&zonelists_mutex);
367 build_all_zonelists(NULL);
368 mutex_unlock(&zonelists_mutex);
369 }
370#endif
371
349 cpu_maps_update_begin(); 372 cpu_maps_update_begin();
350 373
351 if (cpu_hotplug_disabled) { 374 if (cpu_hotplug_disabled) {
@@ -365,11 +388,8 @@ static cpumask_var_t frozen_cpus;
365 388
366int disable_nonboot_cpus(void) 389int disable_nonboot_cpus(void)
367{ 390{
368 int cpu, first_cpu, error; 391 int cpu, first_cpu, error = 0;
369 392
370 error = stop_machine_create();
371 if (error)
372 return error;
373 cpu_maps_update_begin(); 393 cpu_maps_update_begin();
374 first_cpu = cpumask_first(cpu_online_mask); 394 first_cpu = cpumask_first(cpu_online_mask);
375 /* 395 /*
@@ -400,7 +420,6 @@ int disable_nonboot_cpus(void)
400 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 420 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
401 } 421 }
402 cpu_maps_update_done(); 422 cpu_maps_update_done();
403 stop_machine_destroy();
404 return error; 423 return error;
405} 424}
406 425
@@ -467,7 +486,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
467 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 486 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
468 val = CPU_STARTING_FROZEN; 487 val = CPU_STARTING_FROZEN;
469#endif /* CONFIG_PM_SLEEP_SMP */ 488#endif /* CONFIG_PM_SLEEP_SMP */
470 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 489 cpu_notify(val, (void *)(long)cpu);
471} 490}
472 491
473#endif /* CONFIG_SMP */ 492#endif /* CONFIG_SMP */