diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 44 |
1 files changed, 17 insertions, 27 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 2011ad8d2697..c77bc3a1c722 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -33,17 +33,13 @@ static struct { | |||
33 | * an ongoing cpu hotplug operation. | 33 | * an ongoing cpu hotplug operation. |
34 | */ | 34 | */ |
35 | int refcount; | 35 | int refcount; |
36 | wait_queue_head_t writer_queue; | ||
37 | } cpu_hotplug; | 36 | } cpu_hotplug; |
38 | 37 | ||
39 | #define writer_exists() (cpu_hotplug.active_writer != NULL) | ||
40 | |||
41 | void __init cpu_hotplug_init(void) | 38 | void __init cpu_hotplug_init(void) |
42 | { | 39 | { |
43 | cpu_hotplug.active_writer = NULL; | 40 | cpu_hotplug.active_writer = NULL; |
44 | mutex_init(&cpu_hotplug.lock); | 41 | mutex_init(&cpu_hotplug.lock); |
45 | cpu_hotplug.refcount = 0; | 42 | cpu_hotplug.refcount = 0; |
46 | init_waitqueue_head(&cpu_hotplug.writer_queue); | ||
47 | } | 43 | } |
48 | 44 | ||
49 | #ifdef CONFIG_HOTPLUG_CPU | 45 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -65,11 +61,8 @@ void put_online_cpus(void) | |||
65 | if (cpu_hotplug.active_writer == current) | 61 | if (cpu_hotplug.active_writer == current) |
66 | return; | 62 | return; |
67 | mutex_lock(&cpu_hotplug.lock); | 63 | mutex_lock(&cpu_hotplug.lock); |
68 | cpu_hotplug.refcount--; | 64 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
69 | 65 | wake_up_process(cpu_hotplug.active_writer); | |
70 | if (unlikely(writer_exists()) && !cpu_hotplug.refcount) | ||
71 | wake_up(&cpu_hotplug.writer_queue); | ||
72 | |||
73 | mutex_unlock(&cpu_hotplug.lock); | 66 | mutex_unlock(&cpu_hotplug.lock); |
74 | 67 | ||
75 | } | 68 | } |
@@ -98,8 +91,8 @@ void cpu_maps_update_done(void) | |||
98 | * Note that during a cpu-hotplug operation, the new readers, if any, | 91 | * Note that during a cpu-hotplug operation, the new readers, if any, |
99 | * will be blocked by the cpu_hotplug.lock | 92 | * will be blocked by the cpu_hotplug.lock |
100 | * | 93 | * |
101 | * Since cpu_maps_update_begin is always called after invoking | 94 | * Since cpu_hotplug_begin() is always called after invoking |
102 | * cpu_maps_update_begin, we can be sure that only one writer is active. | 95 | * cpu_maps_update_begin(), we can be sure that only one writer is active. |
103 | * | 96 | * |
104 | * Note that theoretically, there is a possibility of a livelock: | 97 | * Note that theoretically, there is a possibility of a livelock: |
105 | * - Refcount goes to zero, last reader wakes up the sleeping | 98 | * - Refcount goes to zero, last reader wakes up the sleeping |
@@ -115,19 +108,16 @@ void cpu_maps_update_done(void) | |||
115 | */ | 108 | */ |
116 | static void cpu_hotplug_begin(void) | 109 | static void cpu_hotplug_begin(void) |
117 | { | 110 | { |
118 | DECLARE_WAITQUEUE(wait, current); | ||
119 | |||
120 | mutex_lock(&cpu_hotplug.lock); | ||
121 | |||
122 | cpu_hotplug.active_writer = current; | 111 | cpu_hotplug.active_writer = current; |
123 | add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait); | 112 | |
124 | while (cpu_hotplug.refcount) { | 113 | for (;;) { |
125 | set_current_state(TASK_UNINTERRUPTIBLE); | 114 | mutex_lock(&cpu_hotplug.lock); |
115 | if (likely(!cpu_hotplug.refcount)) | ||
116 | break; | ||
117 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
126 | mutex_unlock(&cpu_hotplug.lock); | 118 | mutex_unlock(&cpu_hotplug.lock); |
127 | schedule(); | 119 | schedule(); |
128 | mutex_lock(&cpu_hotplug.lock); | ||
129 | } | 120 | } |
130 | remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait); | ||
131 | } | 121 | } |
132 | 122 | ||
133 | static void cpu_hotplug_done(void) | 123 | static void cpu_hotplug_done(void) |
@@ -136,7 +126,7 @@ static void cpu_hotplug_done(void) | |||
136 | mutex_unlock(&cpu_hotplug.lock); | 126 | mutex_unlock(&cpu_hotplug.lock); |
137 | } | 127 | } |
138 | /* Need to know about CPUs going up/down? */ | 128 | /* Need to know about CPUs going up/down? */ |
139 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) | 129 | int __ref register_cpu_notifier(struct notifier_block *nb) |
140 | { | 130 | { |
141 | int ret; | 131 | int ret; |
142 | cpu_maps_update_begin(); | 132 | cpu_maps_update_begin(); |
@@ -149,7 +139,7 @@ int __cpuinit register_cpu_notifier(struct notifier_block *nb) | |||
149 | 139 | ||
150 | EXPORT_SYMBOL(register_cpu_notifier); | 140 | EXPORT_SYMBOL(register_cpu_notifier); |
151 | 141 | ||
152 | void unregister_cpu_notifier(struct notifier_block *nb) | 142 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
153 | { | 143 | { |
154 | cpu_maps_update_begin(); | 144 | cpu_maps_update_begin(); |
155 | raw_notifier_chain_unregister(&cpu_chain, nb); | 145 | raw_notifier_chain_unregister(&cpu_chain, nb); |
@@ -180,7 +170,7 @@ struct take_cpu_down_param { | |||
180 | }; | 170 | }; |
181 | 171 | ||
182 | /* Take this CPU down. */ | 172 | /* Take this CPU down. */ |
183 | static int take_cpu_down(void *_param) | 173 | static int __ref take_cpu_down(void *_param) |
184 | { | 174 | { |
185 | struct take_cpu_down_param *param = _param; | 175 | struct take_cpu_down_param *param = _param; |
186 | int err; | 176 | int err; |
@@ -199,7 +189,7 @@ static int take_cpu_down(void *_param) | |||
199 | } | 189 | } |
200 | 190 | ||
201 | /* Requires cpu_add_remove_lock to be held */ | 191 | /* Requires cpu_add_remove_lock to be held */ |
202 | static int _cpu_down(unsigned int cpu, int tasks_frozen) | 192 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
203 | { | 193 | { |
204 | int err, nr_calls = 0; | 194 | int err, nr_calls = 0; |
205 | struct task_struct *p; | 195 | struct task_struct *p; |
@@ -225,7 +215,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) | |||
225 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 215 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
226 | hcpu, nr_calls, NULL); | 216 | hcpu, nr_calls, NULL); |
227 | printk("%s: attempt to take down CPU %u failed\n", | 217 | printk("%s: attempt to take down CPU %u failed\n", |
228 | __FUNCTION__, cpu); | 218 | __func__, cpu); |
229 | err = -EINVAL; | 219 | err = -EINVAL; |
230 | goto out_release; | 220 | goto out_release; |
231 | } | 221 | } |
@@ -274,7 +264,7 @@ out_release: | |||
274 | return err; | 264 | return err; |
275 | } | 265 | } |
276 | 266 | ||
277 | int cpu_down(unsigned int cpu) | 267 | int __ref cpu_down(unsigned int cpu) |
278 | { | 268 | { |
279 | int err = 0; | 269 | int err = 0; |
280 | 270 | ||
@@ -305,7 +295,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
305 | if (ret == NOTIFY_BAD) { | 295 | if (ret == NOTIFY_BAD) { |
306 | nr_calls--; | 296 | nr_calls--; |
307 | printk("%s: attempt to bring up CPU %u failed\n", | 297 | printk("%s: attempt to bring up CPU %u failed\n", |
308 | __FUNCTION__, cpu); | 298 | __func__, cpu); |
309 | ret = -EINVAL; | 299 | ret = -EINVAL; |
310 | goto out_notify; | 300 | goto out_notify; |
311 | } | 301 | } |