diff options
author | Linus Torvalds <torvalds@macmini.osdl.org> | 2006-07-23 15:12:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@macmini.osdl.org> | 2006-07-23 15:12:16 -0400 |
commit | aa95387774039096c11803c04011f1aa42d85758 (patch) | |
tree | b8d3c845efedc2d67bcaff0038b4306fa375379c /kernel/cpu.c | |
parent | 2cd7cbdf4bd0d0fe58e4dc903e8b413412595504 (diff) |
cpu hotplug: simplify and hopefully fix locking
The CPU hotplug locking was quite messy, with a recursive lock to
handle the fact that both the actual up/down sequence wanted to
protect itself from being re-entered, but the callbacks that it
called also tended to want to protect themselves from CPU events.
This splits the lock into two (one to serialize the whole hotplug
sequence, the other to protect against the CPU present bitmaps
changing). The latter still allows recursive usage because some
subsystems (ondemand policy for cpufreq at least) had already gotten
too used to the lax locking, but the locking mistakes are hopefully
now less fundamental, and we now warn about recursive lock usage
when we see it, in the hope that it can be fixed.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 75 |
1 files changed, 34 insertions, 41 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 70fbf2e83766..f230f9ae01c2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -16,56 +16,48 @@ | |||
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | 17 | ||
18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
19 | static DEFINE_MUTEX(cpucontrol); | 19 | static DEFINE_MUTEX(cpu_add_remove_lock); |
20 | static DEFINE_MUTEX(cpu_bitmask_lock); | ||
20 | 21 | ||
21 | static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); |
22 | 23 | ||
23 | #ifdef CONFIG_HOTPLUG_CPU | 24 | #ifdef CONFIG_HOTPLUG_CPU |
24 | static struct task_struct *lock_cpu_hotplug_owner; | ||
25 | static int lock_cpu_hotplug_depth; | ||
26 | 25 | ||
27 | static int __lock_cpu_hotplug(int interruptible) | 26 | /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ |
28 | { | 27 | static struct task_struct *recursive; |
29 | int ret = 0; | 28 | static int recursive_depth; |
30 | |||
31 | if (lock_cpu_hotplug_owner != current) { | ||
32 | if (interruptible) | ||
33 | ret = mutex_lock_interruptible(&cpucontrol); | ||
34 | else | ||
35 | mutex_lock(&cpucontrol); | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * Set only if we succeed in locking | ||
40 | */ | ||
41 | if (!ret) { | ||
42 | lock_cpu_hotplug_depth++; | ||
43 | lock_cpu_hotplug_owner = current; | ||
44 | } | ||
45 | |||
46 | return ret; | ||
47 | } | ||
48 | 29 | ||
49 | void lock_cpu_hotplug(void) | 30 | void lock_cpu_hotplug(void) |
50 | { | 31 | { |
51 | __lock_cpu_hotplug(0); | 32 | struct task_struct *tsk = current; |
33 | |||
34 | if (tsk == recursive) { | ||
35 | static int warnings = 10; | ||
36 | if (warnings) { | ||
37 | printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); | ||
38 | WARN_ON(1); | ||
39 | warnings--; | ||
40 | } | ||
41 | recursive_depth++; | ||
42 | return; | ||
43 | } | ||
44 | mutex_lock(&cpu_bitmask_lock); | ||
45 | recursive = tsk; | ||
52 | } | 46 | } |
53 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | 47 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); |
54 | 48 | ||
55 | void unlock_cpu_hotplug(void) | 49 | void unlock_cpu_hotplug(void) |
56 | { | 50 | { |
57 | if (--lock_cpu_hotplug_depth == 0) { | 51 | WARN_ON(recursive != current); |
58 | lock_cpu_hotplug_owner = NULL; | 52 | if (recursive_depth) { |
59 | mutex_unlock(&cpucontrol); | 53 | recursive_depth--; |
54 | return; | ||
60 | } | 55 | } |
56 | mutex_unlock(&cpu_bitmask_lock); | ||
57 | recursive = NULL; | ||
61 | } | 58 | } |
62 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | 59 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); |
63 | 60 | ||
64 | int lock_cpu_hotplug_interruptible(void) | ||
65 | { | ||
66 | return __lock_cpu_hotplug(1); | ||
67 | } | ||
68 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | ||
69 | #endif /* CONFIG_HOTPLUG_CPU */ | 61 | #endif /* CONFIG_HOTPLUG_CPU */ |
70 | 62 | ||
71 | /* Need to know about CPUs going up/down? */ | 63 | /* Need to know about CPUs going up/down? */ |
@@ -122,9 +114,7 @@ int cpu_down(unsigned int cpu) | |||
122 | struct task_struct *p; | 114 | struct task_struct *p; |
123 | cpumask_t old_allowed, tmp; | 115 | cpumask_t old_allowed, tmp; |
124 | 116 | ||
125 | if ((err = lock_cpu_hotplug_interruptible()) != 0) | 117 | mutex_lock(&cpu_add_remove_lock); |
126 | return err; | ||
127 | |||
128 | if (num_online_cpus() == 1) { | 118 | if (num_online_cpus() == 1) { |
129 | err = -EBUSY; | 119 | err = -EBUSY; |
130 | goto out; | 120 | goto out; |
@@ -150,7 +140,10 @@ int cpu_down(unsigned int cpu) | |||
150 | cpu_clear(cpu, tmp); | 140 | cpu_clear(cpu, tmp); |
151 | set_cpus_allowed(current, tmp); | 141 | set_cpus_allowed(current, tmp); |
152 | 142 | ||
143 | mutex_lock(&cpu_bitmask_lock); | ||
153 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 144 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
145 | mutex_unlock(&cpu_bitmask_lock); | ||
146 | |||
154 | if (IS_ERR(p)) { | 147 | if (IS_ERR(p)) { |
155 | /* CPU didn't die: tell everyone. Can't complain. */ | 148 | /* CPU didn't die: tell everyone. Can't complain. */ |
156 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 149 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
@@ -187,7 +180,7 @@ out_thread: | |||
187 | out_allowed: | 180 | out_allowed: |
188 | set_cpus_allowed(current, old_allowed); | 181 | set_cpus_allowed(current, old_allowed); |
189 | out: | 182 | out: |
190 | unlock_cpu_hotplug(); | 183 | mutex_unlock(&cpu_add_remove_lock); |
191 | return err; | 184 | return err; |
192 | } | 185 | } |
193 | #endif /*CONFIG_HOTPLUG_CPU*/ | 186 | #endif /*CONFIG_HOTPLUG_CPU*/ |
@@ -197,9 +190,7 @@ int __devinit cpu_up(unsigned int cpu) | |||
197 | int ret; | 190 | int ret; |
198 | void *hcpu = (void *)(long)cpu; | 191 | void *hcpu = (void *)(long)cpu; |
199 | 192 | ||
200 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) | 193 | mutex_lock(&cpu_add_remove_lock); |
201 | return ret; | ||
202 | |||
203 | if (cpu_online(cpu) || !cpu_present(cpu)) { | 194 | if (cpu_online(cpu) || !cpu_present(cpu)) { |
204 | ret = -EINVAL; | 195 | ret = -EINVAL; |
205 | goto out; | 196 | goto out; |
@@ -214,7 +205,9 @@ int __devinit cpu_up(unsigned int cpu) | |||
214 | } | 205 | } |
215 | 206 | ||
216 | /* Arch-specific enabling code. */ | 207 | /* Arch-specific enabling code. */ |
208 | mutex_lock(&cpu_bitmask_lock); | ||
217 | ret = __cpu_up(cpu); | 209 | ret = __cpu_up(cpu); |
210 | mutex_unlock(&cpu_bitmask_lock); | ||
218 | if (ret != 0) | 211 | if (ret != 0) |
219 | goto out_notify; | 212 | goto out_notify; |
220 | BUG_ON(!cpu_online(cpu)); | 213 | BUG_ON(!cpu_online(cpu)); |
@@ -227,6 +220,6 @@ out_notify: | |||
227 | blocking_notifier_call_chain(&cpu_chain, | 220 | blocking_notifier_call_chain(&cpu_chain, |
228 | CPU_UP_CANCELED, hcpu); | 221 | CPU_UP_CANCELED, hcpu); |
229 | out: | 222 | out: |
230 | unlock_cpu_hotplug(); | 223 | mutex_unlock(&cpu_add_remove_lock); |
231 | return ret; | 224 | return ret; |
232 | } | 225 | } |