aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAshok Raj <ashok.raj@intel.com>2005-11-28 16:43:46 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:42:23 -0500
commita9d9baa1e819b2f92f9cfa5240f766c535e636a6 (patch)
tree0ae15e5b1071b395affa0ac9abf6fd746ad60b0e /kernel
parente0f39591cc178026607fcbbe9a53be435fe8285d (diff)
[PATCH] clean up lock_cpu_hotplug() in cpufreq
There are some callers in cpufreq hotplug notify path that the lowest function calls lock_cpu_hotplug(). The lock is already held during cpu_up() and cpu_down() calls when the notify calls are broadcast to registered clients. Ideally if possible, we could disable_preempt() at the highest caller and make sure we dont sleep in the path down in cpufreq->driver_target() calls but the calls are so intertwined and cumbersome to cleanup. Hence we consistently use lock_cpu_hotplug() and unlock_cpu_hotplug() in all places. - Removed export of cpucontrol semaphore and made it static. - removed explicit uses of up/down with lock_cpu_hotplug() so we can keep track of the the callers in same thread context and just keep refcounts without calling a down() that causes a deadlock. - Removed current_in_hotplug() uses - Removed PF_HOTPLUG_CPU in sched.h introduced for the current_in_hotplug() temporary workaround. Tested with insmod of cpufreq_stat.ko, and logical online/offline to make sure we dont have any hang situations. Signed-off-by: Ashok Raj <ashok.raj@intel.com> Cc: Zwane Mwaikambo <zwane@linuxpower.ca> Cc: Shaohua Li <shaohua.li@intel.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c83
1 files changed, 49 insertions, 34 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d61ba88f34e5..e882c6babf41 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,47 +16,76 @@
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19DECLARE_MUTEX(cpucontrol); 19static DECLARE_MUTEX(cpucontrol);
20EXPORT_SYMBOL_GPL(cpucontrol);
21 20
22static struct notifier_block *cpu_chain; 21static struct notifier_block *cpu_chain;
23 22
24/* 23#ifdef CONFIG_HOTPLUG_CPU
25 * Used to check by callers if they need to acquire the cpucontrol 24static struct task_struct *lock_cpu_hotplug_owner;
26 * or not to protect a cpu from being removed. Its sometimes required to 25static int lock_cpu_hotplug_depth;
27 * call these functions both for normal operations, and in response to
28 * a cpu being added/removed. If the context of the call is in the same
29 * thread context as a CPU hotplug thread, we dont need to take the lock
30 * since its already protected
31 * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
32 */
33 26
34int current_in_cpu_hotplug(void) 27static int __lock_cpu_hotplug(int interruptible)
35{ 28{
36 return (current->flags & PF_HOTPLUG_CPU); 29 int ret = 0;
30
31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible)
33 ret = down_interruptible(&cpucontrol);
34 else
35 down(&cpucontrol);
36 }
37
38 /*
39 * Set only if we succeed in locking
40 */
41 if (!ret) {
42 lock_cpu_hotplug_depth++;
43 lock_cpu_hotplug_owner = current;
44 }
45
46 return ret;
37} 47}
38 48
39EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); 49void lock_cpu_hotplug(void)
50{
51 __lock_cpu_hotplug(0);
52}
53EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
40 54
55void unlock_cpu_hotplug(void)
56{
57 if (--lock_cpu_hotplug_depth == 0) {
58 lock_cpu_hotplug_owner = NULL;
59 up(&cpucontrol);
60 }
61}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
63
64int lock_cpu_hotplug_interruptible(void)
65{
66 return __lock_cpu_hotplug(1);
67}
68EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */
41 70
42/* Need to know about CPUs going up/down? */ 71/* Need to know about CPUs going up/down? */
43int register_cpu_notifier(struct notifier_block *nb) 72int register_cpu_notifier(struct notifier_block *nb)
44{ 73{
45 int ret; 74 int ret;
46 75
47 if ((ret = down_interruptible(&cpucontrol)) != 0) 76 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
48 return ret; 77 return ret;
49 ret = notifier_chain_register(&cpu_chain, nb); 78 ret = notifier_chain_register(&cpu_chain, nb);
50 up(&cpucontrol); 79 unlock_cpu_hotplug();
51 return ret; 80 return ret;
52} 81}
53EXPORT_SYMBOL(register_cpu_notifier); 82EXPORT_SYMBOL(register_cpu_notifier);
54 83
55void unregister_cpu_notifier(struct notifier_block *nb) 84void unregister_cpu_notifier(struct notifier_block *nb)
56{ 85{
57 down(&cpucontrol); 86 lock_cpu_hotplug();
58 notifier_chain_unregister(&cpu_chain, nb); 87 notifier_chain_unregister(&cpu_chain, nb);
59 up(&cpucontrol); 88 unlock_cpu_hotplug();
60} 89}
61EXPORT_SYMBOL(unregister_cpu_notifier); 90EXPORT_SYMBOL(unregister_cpu_notifier);
62 91
@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
112 goto out; 141 goto out;
113 } 142 }
114 143
115 /*
116 * Leave a trace in current->flags indicating we are already in
117 * process of performing CPU hotplug. Callers can check if cpucontrol
118 * is already acquired by current thread, and if so not cause
119 * a dead lock by not acquiring the lock
120 */
121 current->flags |= PF_HOTPLUG_CPU;
122 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 144 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
123 (void *)(long)cpu); 145 (void *)(long)cpu);
124 if (err == NOTIFY_BAD) { 146 if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@ out_thread:
171out_allowed: 193out_allowed:
172 set_cpus_allowed(current, old_allowed); 194 set_cpus_allowed(current, old_allowed);
173out: 195out:
174 current->flags &= ~PF_HOTPLUG_CPU;
175 unlock_cpu_hotplug(); 196 unlock_cpu_hotplug();
176 return err; 197 return err;
177} 198}
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
182 int ret; 203 int ret;
183 void *hcpu = (void *)(long)cpu; 204 void *hcpu = (void *)(long)cpu;
184 205
185 if ((ret = down_interruptible(&cpucontrol)) != 0) 206 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
186 return ret; 207 return ret;
187 208
188 if (cpu_online(cpu) || !cpu_present(cpu)) { 209 if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
190 goto out; 211 goto out;
191 } 212 }
192 213
193 /*
194 * Leave a trace in current->flags indicating we are already in
195 * process of performing CPU hotplug.
196 */
197 current->flags |= PF_HOTPLUG_CPU;
198 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 214 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
199 if (ret == NOTIFY_BAD) { 215 if (ret == NOTIFY_BAD) {
200 printk("%s: attempt to bring up CPU %u failed\n", 216 printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@ out_notify:
217 if (ret != 0) 233 if (ret != 0)
218 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); 234 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
219out: 235out:
220 current->flags &= ~PF_HOTPLUG_CPU; 236 unlock_cpu_hotplug();
221 up(&cpucontrol);
222 return ret; 237 return ret;
223} 238}