diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-12-22 06:36:30 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-01-04 17:10:14 -0500 |
commit | 9ea09af3bd3090e8349ca2899ca2011bd94cda85 (patch) | |
tree | 36396347bb750a6aecb0771cfebf9887aaaae492 | |
parent | c298be74492bece102f3379d14015638f1fd1fac (diff) |
stop_machine: introduce stop_machine_create/destroy.
Introduce stop_machine_create/destroy. With this interface subsystems
that need a non-failing stop_machine environment can create the
stop_machine machine threads before actually calling stop_machine.
When the threads aren't needed anymore they can be killed with
stop_machine_destroy again.
When stop_machine gets called and the threads aren't present they
will be created and destroyed automatically. This restores the old
behaviour of stop_machine.
This patch also converts cpu hotplug to the new interface since it
is special: cpu_down calls __stop_machine instead of stop_machine.
However the kstop threads will only be created when stop_machine
gets called.
Changing the code so that the threads would be created automatically
on __stop_machine is currently not possible: when __stop_machine gets
called we hold cpu_add_remove_lock, which is the same lock that
create_rt_workqueue would take. So the workqueue needs to be created
before the cpu hotplug code locks cpu_add_remove_lock.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | include/linux/stop_machine.h | 22 | ||||
-rw-r--r-- | kernel/cpu.c | 6 | ||||
-rw-r--r-- | kernel/stop_machine.c | 55 |
3 files changed, 72 insertions, 11 deletions
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 74d59a641362..baba3a23a814 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -35,6 +35,24 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | |||
35 | * won't come or go while it's being called. Used by hotplug cpu. | 35 | * won't come or go while it's being called. Used by hotplug cpu. |
36 | */ | 36 | */ |
37 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | 37 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); |
38 | |||
39 | /** | ||
40 | * stop_machine_create: create all stop_machine threads | ||
41 | * | ||
42 | * Description: This causes all stop_machine threads to be created before | ||
43 | * stop_machine actually gets called. This can be used by subsystems that | ||
44 | * need a non failing stop_machine infrastructure. | ||
45 | */ | ||
46 | int stop_machine_create(void); | ||
47 | |||
48 | /** | ||
49 | * stop_machine_destroy: destroy all stop_machine threads | ||
50 | * | ||
51 | * Description: This causes all stop_machine threads which were created with | ||
52 | * stop_machine_create to be destroyed again. | ||
53 | */ | ||
54 | void stop_machine_destroy(void); | ||
55 | |||
38 | #else | 56 | #else |
39 | 57 | ||
40 | static inline int stop_machine(int (*fn)(void *), void *data, | 58 | static inline int stop_machine(int (*fn)(void *), void *data, |
@@ -46,5 +64,9 @@ static inline int stop_machine(int (*fn)(void *), void *data, | |||
46 | local_irq_enable(); | 64 | local_irq_enable(); |
47 | return ret; | 65 | return ret; |
48 | } | 66 | } |
67 | |||
68 | static inline int stop_machine_create(void) { return 0; } | ||
69 | static inline void stop_machine_destroy(void) { } | ||
70 | |||
49 | #endif /* CONFIG_SMP */ | 71 | #endif /* CONFIG_SMP */ |
50 | #endif /* _LINUX_STOP_MACHINE */ | 72 | #endif /* _LINUX_STOP_MACHINE */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 47fff3b63cbf..30e74dd6d01b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -269,8 +269,11 @@ out_release: | |||
269 | 269 | ||
270 | int __ref cpu_down(unsigned int cpu) | 270 | int __ref cpu_down(unsigned int cpu) |
271 | { | 271 | { |
272 | int err = 0; | 272 | int err; |
273 | 273 | ||
274 | err = stop_machine_create(); | ||
275 | if (err) | ||
276 | return err; | ||
274 | cpu_maps_update_begin(); | 277 | cpu_maps_update_begin(); |
275 | 278 | ||
276 | if (cpu_hotplug_disabled) { | 279 | if (cpu_hotplug_disabled) { |
@@ -297,6 +300,7 @@ int __ref cpu_down(unsigned int cpu) | |||
297 | 300 | ||
298 | out: | 301 | out: |
299 | cpu_maps_update_done(); | 302 | cpu_maps_update_done(); |
303 | stop_machine_destroy(); | ||
300 | return err; | 304 | return err; |
301 | } | 305 | } |
302 | EXPORT_SYMBOL(cpu_down); | 306 | EXPORT_SYMBOL(cpu_down); |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 286c41722e8c..0cd415ee62a2 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -38,7 +38,10 @@ struct stop_machine_data { | |||
38 | static unsigned int num_threads; | 38 | static unsigned int num_threads; |
39 | static atomic_t thread_ack; | 39 | static atomic_t thread_ack; |
40 | static DEFINE_MUTEX(lock); | 40 | static DEFINE_MUTEX(lock); |
41 | 41 | /* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */ | |
42 | static DEFINE_MUTEX(setup_lock); | ||
43 | /* Users of stop_machine. */ | ||
44 | static int refcount; | ||
42 | static struct workqueue_struct *stop_machine_wq; | 45 | static struct workqueue_struct *stop_machine_wq; |
43 | static struct stop_machine_data active, idle; | 46 | static struct stop_machine_data active, idle; |
44 | static const cpumask_t *active_cpus; | 47 | static const cpumask_t *active_cpus; |
@@ -109,6 +112,43 @@ static int chill(void *unused) | |||
109 | return 0; | 112 | return 0; |
110 | } | 113 | } |
111 | 114 | ||
115 | int stop_machine_create(void) | ||
116 | { | ||
117 | mutex_lock(&setup_lock); | ||
118 | if (refcount) | ||
119 | goto done; | ||
120 | stop_machine_wq = create_rt_workqueue("kstop"); | ||
121 | if (!stop_machine_wq) | ||
122 | goto err_out; | ||
123 | stop_machine_work = alloc_percpu(struct work_struct); | ||
124 | if (!stop_machine_work) | ||
125 | goto err_out; | ||
126 | done: | ||
127 | refcount++; | ||
128 | mutex_unlock(&setup_lock); | ||
129 | return 0; | ||
130 | |||
131 | err_out: | ||
132 | if (stop_machine_wq) | ||
133 | destroy_workqueue(stop_machine_wq); | ||
134 | mutex_unlock(&setup_lock); | ||
135 | return -ENOMEM; | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(stop_machine_create); | ||
138 | |||
139 | void stop_machine_destroy(void) | ||
140 | { | ||
141 | mutex_lock(&setup_lock); | ||
142 | refcount--; | ||
143 | if (refcount) | ||
144 | goto done; | ||
145 | destroy_workqueue(stop_machine_wq); | ||
146 | free_percpu(stop_machine_work); | ||
147 | done: | ||
148 | mutex_unlock(&setup_lock); | ||
149 | } | ||
150 | EXPORT_SYMBOL_GPL(stop_machine_destroy); | ||
151 | |||
112 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | 152 | int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) |
113 | { | 153 | { |
114 | struct work_struct *sm_work; | 154 | struct work_struct *sm_work; |
@@ -146,19 +186,14 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
146 | { | 186 | { |
147 | int ret; | 187 | int ret; |
148 | 188 | ||
189 | ret = stop_machine_create(); | ||
190 | if (ret) | ||
191 | return ret; | ||
149 | /* No CPUs can come up or down during this. */ | 192 | /* No CPUs can come up or down during this. */ |
150 | get_online_cpus(); | 193 | get_online_cpus(); |
151 | ret = __stop_machine(fn, data, cpus); | 194 | ret = __stop_machine(fn, data, cpus); |
152 | put_online_cpus(); | 195 | put_online_cpus(); |
153 | 196 | stop_machine_destroy(); | |
154 | return ret; | 197 | return ret; |
155 | } | 198 | } |
156 | EXPORT_SYMBOL_GPL(stop_machine); | 199 | EXPORT_SYMBOL_GPL(stop_machine); |
157 | |||
158 | static int __init stop_machine_init(void) | ||
159 | { | ||
160 | stop_machine_wq = create_rt_workqueue("kstop"); | ||
161 | stop_machine_work = alloc_percpu(struct work_struct); | ||
162 | return 0; | ||
163 | } | ||
164 | core_initcall(stop_machine_init); | ||