aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-06-04 05:33:10 -0400
committerRobert Richter <robert.richter@amd.com>2010-06-04 05:33:10 -0400
commitd8a382d2662822248a97ce9d670b90e68aefbd3a (patch)
tree4f5bbd5d0a5881ed42de611402ea4ac2c6d6ff48 /kernel/cpu.c
parent45c34e05c4e3d36e7c44e790241ea11a1d90d54e (diff)
parentc6df8d5ab87a246942d138321e1721edbb69f6e1 (diff)
Merge remote branch 'tip/perf/urgent' into oprofile/urgent
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c157
1 files changed, 91 insertions, 66 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 25bba73b1be3..8b92539b4754 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,6 +20,20 @@
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
21static DEFINE_MUTEX(cpu_add_remove_lock); 21static DEFINE_MUTEX(cpu_add_remove_lock);
22 22
23/*
24 * The following two API's must be used when attempting
25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
26 */
27void cpu_maps_update_begin(void)
28{
29 mutex_lock(&cpu_add_remove_lock);
30}
31
32void cpu_maps_update_done(void)
33{
34 mutex_unlock(&cpu_add_remove_lock);
35}
36
23static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 37static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
24 38
25/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
@@ -27,6 +41,8 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
27 */ 41 */
28static int cpu_hotplug_disabled; 42static int cpu_hotplug_disabled;
29 43
44#ifdef CONFIG_HOTPLUG_CPU
45
30static struct { 46static struct {
31 struct task_struct *active_writer; 47 struct task_struct *active_writer;
32 struct mutex lock; /* Synchronizes accesses to refcount, */ 48 struct mutex lock; /* Synchronizes accesses to refcount, */
@@ -41,8 +57,6 @@ static struct {
41 .refcount = 0, 57 .refcount = 0,
42}; 58};
43 59
44#ifdef CONFIG_HOTPLUG_CPU
45
46void get_online_cpus(void) 60void get_online_cpus(void)
47{ 61{
48 might_sleep(); 62 might_sleep();
@@ -67,22 +81,6 @@ void put_online_cpus(void)
67} 81}
68EXPORT_SYMBOL_GPL(put_online_cpus); 82EXPORT_SYMBOL_GPL(put_online_cpus);
69 83
70#endif /* CONFIG_HOTPLUG_CPU */
71
72/*
73 * The following two API's must be used when attempting
74 * to serialize the updates to cpu_online_mask, cpu_present_mask.
75 */
76void cpu_maps_update_begin(void)
77{
78 mutex_lock(&cpu_add_remove_lock);
79}
80
81void cpu_maps_update_done(void)
82{
83 mutex_unlock(&cpu_add_remove_lock);
84}
85
86/* 84/*
87 * This ensures that the hotplug operation can begin only when the 85 * This ensures that the hotplug operation can begin only when the
88 * refcount goes to zero. 86 * refcount goes to zero.
@@ -124,6 +122,12 @@ static void cpu_hotplug_done(void)
124 cpu_hotplug.active_writer = NULL; 122 cpu_hotplug.active_writer = NULL;
125 mutex_unlock(&cpu_hotplug.lock); 123 mutex_unlock(&cpu_hotplug.lock);
126} 124}
125
126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */
130
127/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
128int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
129{ 133{
@@ -134,8 +138,29 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
134 return ret; 138 return ret;
135} 139}
136 140
141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
142 int *nr_calls)
143{
144 int ret;
145
146 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
147 nr_calls);
148
149 return notifier_to_errno(ret);
150}
151
152static int cpu_notify(unsigned long val, void *v)
153{
154 return __cpu_notify(val, v, -1, NULL);
155}
156
137#ifdef CONFIG_HOTPLUG_CPU 157#ifdef CONFIG_HOTPLUG_CPU
138 158
159static void cpu_notify_nofail(unsigned long val, void *v)
160{
161 BUG_ON(cpu_notify(val, v));
162}
163
139EXPORT_SYMBOL(register_cpu_notifier); 164EXPORT_SYMBOL(register_cpu_notifier);
140 165
141void __ref unregister_cpu_notifier(struct notifier_block *nb) 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
@@ -164,6 +189,7 @@ static inline void check_for_tasks(int cpu)
164} 189}
165 190
166struct take_cpu_down_param { 191struct take_cpu_down_param {
192 struct task_struct *caller;
167 unsigned long mod; 193 unsigned long mod;
168 void *hcpu; 194 void *hcpu;
169}; 195};
@@ -172,6 +198,7 @@ struct take_cpu_down_param {
172static int __ref take_cpu_down(void *_param) 198static int __ref take_cpu_down(void *_param)
173{ 199{
174 struct take_cpu_down_param *param = _param; 200 struct take_cpu_down_param *param = _param;
201 unsigned int cpu = (unsigned long)param->hcpu;
175 int err; 202 int err;
176 203
177 /* Ensure this CPU doesn't handle any more interrupts. */ 204 /* Ensure this CPU doesn't handle any more interrupts. */
@@ -179,9 +206,10 @@ static int __ref take_cpu_down(void *_param)
179 if (err < 0) 206 if (err < 0)
180 return err; 207 return err;
181 208
182 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 209 cpu_notify(CPU_DYING | param->mod, param->hcpu);
183 param->hcpu);
184 210
211 if (task_cpu(param->caller) == cpu)
212 move_task_off_dead_cpu(cpu, param->caller);
185 /* Force idle task to run as soon as we yield: it should 213 /* Force idle task to run as soon as we yield: it should
186 immediately notice cpu is offline and die quickly. */ 214 immediately notice cpu is offline and die quickly. */
187 sched_idle_next(); 215 sched_idle_next();
@@ -192,10 +220,10 @@ static int __ref take_cpu_down(void *_param)
192static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 220static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
193{ 221{
194 int err, nr_calls = 0; 222 int err, nr_calls = 0;
195 cpumask_var_t old_allowed;
196 void *hcpu = (void *)(long)cpu; 223 void *hcpu = (void *)(long)cpu;
197 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 224 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
198 struct take_cpu_down_param tcd_param = { 225 struct take_cpu_down_param tcd_param = {
226 .caller = current,
199 .mod = mod, 227 .mod = mod,
200 .hcpu = hcpu, 228 .hcpu = hcpu,
201 }; 229 };
@@ -206,38 +234,26 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
206 if (!cpu_online(cpu)) 234 if (!cpu_online(cpu))
207 return -EINVAL; 235 return -EINVAL;
208 236
209 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
210 return -ENOMEM;
211
212 cpu_hotplug_begin(); 237 cpu_hotplug_begin();
213 set_cpu_active(cpu, false); 238 set_cpu_active(cpu, false);
214 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 239 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
215 hcpu, -1, &nr_calls); 240 if (err) {
216 if (err == NOTIFY_BAD) {
217 set_cpu_active(cpu, true); 241 set_cpu_active(cpu, true);
218 242
219 nr_calls--; 243 nr_calls--;
220 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 244 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
221 hcpu, nr_calls, NULL);
222 printk("%s: attempt to take down CPU %u failed\n", 245 printk("%s: attempt to take down CPU %u failed\n",
223 __func__, cpu); 246 __func__, cpu);
224 err = -EINVAL;
225 goto out_release; 247 goto out_release;
226 } 248 }
227 249
228 /* Ensure that we are not runnable on dying cpu */
229 cpumask_copy(old_allowed, &current->cpus_allowed);
230 set_cpus_allowed_ptr(current, cpu_active_mask);
231
232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 250 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
233 if (err) { 251 if (err) {
234 set_cpu_active(cpu, true); 252 set_cpu_active(cpu, true);
235 /* CPU didn't die: tell everyone. Can't complain. */ 253 /* CPU didn't die: tell everyone. Can't complain. */
236 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 254 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
237 hcpu) == NOTIFY_BAD)
238 BUG();
239 255
240 goto out_allowed; 256 goto out_release;
241 } 257 }
242 BUG_ON(cpu_online(cpu)); 258 BUG_ON(cpu_online(cpu));
243 259
@@ -249,22 +265,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
249 __cpu_die(cpu); 265 __cpu_die(cpu);
250 266
251 /* CPU is completely dead: tell everyone. Too late to complain. */ 267 /* CPU is completely dead: tell everyone. Too late to complain. */
252 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 268 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
253 hcpu) == NOTIFY_BAD)
254 BUG();
255 269
256 check_for_tasks(cpu); 270 check_for_tasks(cpu);
257 271
258out_allowed:
259 set_cpus_allowed_ptr(current, old_allowed);
260out_release: 272out_release:
261 cpu_hotplug_done(); 273 cpu_hotplug_done();
262 if (!err) { 274 if (!err)
263 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 275 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
264 hcpu) == NOTIFY_BAD)
265 BUG();
266 }
267 free_cpumask_var(old_allowed);
268 return err; 276 return err;
269} 277}
270 278
@@ -272,9 +280,6 @@ int __ref cpu_down(unsigned int cpu)
272{ 280{
273 int err; 281 int err;
274 282
275 err = stop_machine_create();
276 if (err)
277 return err;
278 cpu_maps_update_begin(); 283 cpu_maps_update_begin();
279 284
280 if (cpu_hotplug_disabled) { 285 if (cpu_hotplug_disabled) {
@@ -286,7 +291,6 @@ int __ref cpu_down(unsigned int cpu)
286 291
287out: 292out:
288 cpu_maps_update_done(); 293 cpu_maps_update_done();
289 stop_machine_destroy();
290 return err; 294 return err;
291} 295}
292EXPORT_SYMBOL(cpu_down); 296EXPORT_SYMBOL(cpu_down);
@@ -303,13 +307,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
303 return -EINVAL; 307 return -EINVAL;
304 308
305 cpu_hotplug_begin(); 309 cpu_hotplug_begin();
306 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 310 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
307 -1, &nr_calls); 311 if (ret) {
308 if (ret == NOTIFY_BAD) {
309 nr_calls--; 312 nr_calls--;
310 printk("%s: attempt to bring up CPU %u failed\n", 313 printk("%s: attempt to bring up CPU %u failed\n",
311 __func__, cpu); 314 __func__, cpu);
312 ret = -EINVAL;
313 goto out_notify; 315 goto out_notify;
314 } 316 }
315 317
@@ -322,12 +324,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
322 set_cpu_active(cpu, true); 324 set_cpu_active(cpu, true);
323 325
324 /* Now call notifier in preparation. */ 326 /* Now call notifier in preparation. */
325 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 327 cpu_notify(CPU_ONLINE | mod, hcpu);
326 328
327out_notify: 329out_notify:
328 if (ret != 0) 330 if (ret != 0)
329 __raw_notifier_call_chain(&cpu_chain, 331 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
330 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
331 cpu_hotplug_done(); 332 cpu_hotplug_done();
332 333
333 return ret; 334 return ret;
@@ -336,6 +337,12 @@ out_notify:
336int __cpuinit cpu_up(unsigned int cpu) 337int __cpuinit cpu_up(unsigned int cpu)
337{ 338{
338 int err = 0; 339 int err = 0;
340
341#ifdef CONFIG_MEMORY_HOTPLUG
342 int nid;
343 pg_data_t *pgdat;
344#endif
345
339 if (!cpu_possible(cpu)) { 346 if (!cpu_possible(cpu)) {
340 printk(KERN_ERR "can't online cpu %d because it is not " 347 printk(KERN_ERR "can't online cpu %d because it is not "
341 "configured as may-hotadd at boot time\n", cpu); 348 "configured as may-hotadd at boot time\n", cpu);
@@ -346,6 +353,28 @@ int __cpuinit cpu_up(unsigned int cpu)
346 return -EINVAL; 353 return -EINVAL;
347 } 354 }
348 355
356#ifdef CONFIG_MEMORY_HOTPLUG
357 nid = cpu_to_node(cpu);
358 if (!node_online(nid)) {
359 err = mem_online_node(nid);
360 if (err)
361 return err;
362 }
363
364 pgdat = NODE_DATA(nid);
365 if (!pgdat) {
366 printk(KERN_ERR
367 "Can't online cpu %d due to NULL pgdat\n", cpu);
368 return -ENOMEM;
369 }
370
371 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
372 mutex_lock(&zonelists_mutex);
373 build_all_zonelists(NULL);
374 mutex_unlock(&zonelists_mutex);
375 }
376#endif
377
349 cpu_maps_update_begin(); 378 cpu_maps_update_begin();
350 379
351 if (cpu_hotplug_disabled) { 380 if (cpu_hotplug_disabled) {
@@ -365,11 +394,8 @@ static cpumask_var_t frozen_cpus;
365 394
366int disable_nonboot_cpus(void) 395int disable_nonboot_cpus(void)
367{ 396{
368 int cpu, first_cpu, error; 397 int cpu, first_cpu, error = 0;
369 398
370 error = stop_machine_create();
371 if (error)
372 return error;
373 cpu_maps_update_begin(); 399 cpu_maps_update_begin();
374 first_cpu = cpumask_first(cpu_online_mask); 400 first_cpu = cpumask_first(cpu_online_mask);
375 /* 401 /*
@@ -400,7 +426,6 @@ int disable_nonboot_cpus(void)
400 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 426 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
401 } 427 }
402 cpu_maps_update_done(); 428 cpu_maps_update_done();
403 stop_machine_destroy();
404 return error; 429 return error;
405} 430}
406 431
@@ -467,7 +492,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
467 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 492 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
468 val = CPU_STARTING_FROZEN; 493 val = CPU_STARTING_FROZEN;
469#endif /* CONFIG_PM_SLEEP_SMP */ 494#endif /* CONFIG_PM_SLEEP_SMP */
470 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 495 cpu_notify(val, (void *)(long)cpu);
471} 496}
472 497
473#endif /* CONFIG_SMP */ 498#endif /* CONFIG_SMP */