diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2007-05-09 05:34:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 15:30:51 -0400 |
commit | baaca49f415b25fdbe2a8f3c22b39929e450fbfd (patch) | |
tree | b152b4235fc07fe56619bd3a2e975e5ca90b8c5b /kernel | |
parent | 6f7cc11aa6c7d5002e16096c7590944daece70ed (diff) |
Define and use new events,CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE
This is an attempt to provide an alternate mechanism for postponing
a hotplug event instead of using a global mechanism like lock_cpu_hotplug.
The proposal is to add two new events namely CPU_LOCK_ACQUIRE and
CPU_LOCK_RELEASE. The notification for these two events would be sent
out before and after a cpu_hotplug event respectively.
During the CPU_LOCK_ACQUIRE event, a cpu-hotplug-aware subsystem is
supposed to acquire any per-subsystem hotcpu mutex ( Eg. workqueue_mutex
in kernel/workqueue.c ).
During the CPU_LOCK_RELEASE release event the cpu-hotplug-aware subsystem
is supposed to release the per-subsystem hotcpu mutex.
The reasons for defining new events as opposed to reusing the existing events
like CPU_UP_PREPARE/CPU_UP_FAILED/CPU_ONLINE for locking/unlocking of
per-subsystem hotcpu mutexes are as follow:
- CPU_LOCK_ACQUIRE: All hotcpu mutexes are taken before subsystems
start handling pre-hotplug events like CPU_UP_PREPARE/CPU_DOWN_PREPARE
etc, thus ensuring a clean handling of these events.
- CPU_LOCK_RELEASE: The hotcpu mutexes will be released only after
all subsystems have handled post-hotplug events like CPU_DOWN_FAILED,
CPU_DEAD,CPU_ONLINE etc thereby ensuring that there are no subsequent
clashes amongst the interdependent subsystems after a cpu hotplugs.
This patch also uses __raw_notifier_call chain in _cpu_up to take care
of the dependency between the two consequetive calls to
raw_notifier_call_chain.
[akpm@linux-foundation.org: fix a bug]
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 36e70845cfc3..48810498b355 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -132,12 +132,15 @@ static int _cpu_down(unsigned int cpu) | |||
132 | if (!cpu_online(cpu)) | 132 | if (!cpu_online(cpu)) |
133 | return -EINVAL; | 133 | return -EINVAL; |
134 | 134 | ||
135 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, | ||
136 | (void *)(long)cpu); | ||
135 | err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 137 | err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
136 | (void *)(long)cpu); | 138 | (void *)(long)cpu); |
137 | if (err == NOTIFY_BAD) { | 139 | if (err == NOTIFY_BAD) { |
138 | printk("%s: attempt to take down CPU %u failed\n", | 140 | printk("%s: attempt to take down CPU %u failed\n", |
139 | __FUNCTION__, cpu); | 141 | __FUNCTION__, cpu); |
140 | return -EINVAL; | 142 | err = -EINVAL; |
143 | goto out_release; | ||
141 | } | 144 | } |
142 | 145 | ||
143 | /* Ensure that we are not runnable on dying cpu */ | 146 | /* Ensure that we are not runnable on dying cpu */ |
@@ -185,6 +188,9 @@ out_thread: | |||
185 | err = kthread_stop(p); | 188 | err = kthread_stop(p); |
186 | out_allowed: | 189 | out_allowed: |
187 | set_cpus_allowed(current, old_allowed); | 190 | set_cpus_allowed(current, old_allowed); |
191 | out_release: | ||
192 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, | ||
193 | (void *)(long)cpu); | ||
188 | return err; | 194 | return err; |
189 | } | 195 | } |
190 | 196 | ||
@@ -206,13 +212,15 @@ int cpu_down(unsigned int cpu) | |||
206 | /* Requires cpu_add_remove_lock to be held */ | 212 | /* Requires cpu_add_remove_lock to be held */ |
207 | static int __cpuinit _cpu_up(unsigned int cpu) | 213 | static int __cpuinit _cpu_up(unsigned int cpu) |
208 | { | 214 | { |
209 | int ret; | 215 | int ret, nr_calls = 0; |
210 | void *hcpu = (void *)(long)cpu; | 216 | void *hcpu = (void *)(long)cpu; |
211 | 217 | ||
212 | if (cpu_online(cpu) || !cpu_present(cpu)) | 218 | if (cpu_online(cpu) || !cpu_present(cpu)) |
213 | return -EINVAL; | 219 | return -EINVAL; |
214 | 220 | ||
215 | ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 221 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); |
222 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, | ||
223 | -1, &nr_calls); | ||
216 | if (ret == NOTIFY_BAD) { | 224 | if (ret == NOTIFY_BAD) { |
217 | printk("%s: attempt to bring up CPU %u failed\n", | 225 | printk("%s: attempt to bring up CPU %u failed\n", |
218 | __FUNCTION__, cpu); | 226 | __FUNCTION__, cpu); |
@@ -233,8 +241,9 @@ static int __cpuinit _cpu_up(unsigned int cpu) | |||
233 | 241 | ||
234 | out_notify: | 242 | out_notify: |
235 | if (ret != 0) | 243 | if (ret != 0) |
236 | raw_notifier_call_chain(&cpu_chain, | 244 | __raw_notifier_call_chain(&cpu_chain, |
237 | CPU_UP_CANCELED, hcpu); | 245 | CPU_UP_CANCELED, hcpu, nr_calls, NULL); |
246 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | ||
238 | 247 | ||
239 | return ret; | 248 | return ret; |
240 | } | 249 | } |