aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2008-01-25 15:08:02 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-25 15:08:02 -0500
commit86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (patch)
tree7bf46885326a6fdbb0c3596855408e9a5634dd3a /kernel/cpuset.c
parentd221938c049f4845da13c8593132595a6b9222a8 (diff)
cpu-hotplug: replace lock_cpu_hotplug() with get_online_cpus()
Replace all lock_cpu_hotplug/unlock_cpu_hotplug from the kernel and use get_online_cpus and put_online_cpus instead as it highlights the refcount semantics in these operations. The new API guarantees protection against the cpu-hotplug operation, but it doesn't guarantee serialized access to any of the local data structures. Hence the changes needs to be reviewed. In case of pseries_add_processor/pseries_remove_processor, use cpu_maps_update_begin()/cpu_maps_update_done() as we're modifying the cpu_present_map there. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 50f5dc46368..cfaf6419d81 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -537,10 +537,10 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
537 * 537 *
538 * Call with cgroup_mutex held. May take callback_mutex during 538 * Call with cgroup_mutex held. May take callback_mutex during
539 * call due to the kfifo_alloc() and kmalloc() calls. May nest 539 * call due to the kfifo_alloc() and kmalloc() calls. May nest
540 * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 540 * a call to the get_online_cpus()/put_online_cpus() pair.
541 * Must not be called holding callback_mutex, because we must not 541 * Must not be called holding callback_mutex, because we must not
542 * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere 542 * call get_online_cpus() while holding callback_mutex. Elsewhere
543 * the kernel nests callback_mutex inside lock_cpu_hotplug() calls. 543 * the kernel nests callback_mutex inside get_online_cpus() calls.
544 * So the reverse nesting would risk an ABBA deadlock. 544 * So the reverse nesting would risk an ABBA deadlock.
545 * 545 *
546 * The three key local variables below are: 546 * The three key local variables below are:
@@ -691,9 +691,9 @@ restart:
691 691
692rebuild: 692rebuild:
693 /* Have scheduler rebuild sched domains */ 693 /* Have scheduler rebuild sched domains */
694 lock_cpu_hotplug(); 694 get_online_cpus();
695 partition_sched_domains(ndoms, doms); 695 partition_sched_domains(ndoms, doms);
696 unlock_cpu_hotplug(); 696 put_online_cpus();
697 697
698done: 698done:
699 if (q && !IS_ERR(q)) 699 if (q && !IS_ERR(q))
@@ -1617,10 +1617,10 @@ static struct cgroup_subsys_state *cpuset_create(
1617 * 1617 *
1618 * If the cpuset being removed has its flag 'sched_load_balance' 1618 * If the cpuset being removed has its flag 'sched_load_balance'
1619 * enabled, then simulate turning sched_load_balance off, which 1619 * enabled, then simulate turning sched_load_balance off, which
1620 * will call rebuild_sched_domains(). The lock_cpu_hotplug() 1620 * will call rebuild_sched_domains(). The get_online_cpus()
1621 * call in rebuild_sched_domains() must not be made while holding 1621 * call in rebuild_sched_domains() must not be made while holding
1622 * callback_mutex. Elsewhere the kernel nests callback_mutex inside 1622 * callback_mutex. Elsewhere the kernel nests callback_mutex inside
1623 * lock_cpu_hotplug() calls. So the reverse nesting would risk an 1623 * get_online_cpus() calls. So the reverse nesting would risk an
1624 * ABBA deadlock. 1624 * ABBA deadlock.
1625 */ 1625 */
1626 1626