aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smpboot.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-07-16 06:42:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-08-13 11:01:07 -0400
commitf97f8f06a49febbc3cb3635172efbe64ddc79700 (patch)
tree7917324eb3e1c36963a8c60c6a5601708b3ef208 /kernel/smpboot.c
parent2a1d446019f9a5983ec5a335b95e8593fdb6fa2e (diff)
smpboot: Provide infrastructure for percpu hotplug threads
Provide a generic interface for setting up and tearing down percpu threads. On registration the threads for already online cpus are created and started. On deregistration (modules) the threads are stoppped. During hotplug operations the threads are created, started, parked and unparked. The datastructure for registration provides a pointer to percpu storage space and optional setup, cleanup, park, unpark functions. These functions are called when the thread state changes. Each implementation has to provide a function which is queried and returns whether the thread should run and the thread function itself. The core code handles all state transitions and avoids duplicated code in the call sites. [ paulmck: Preemption leak fix ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/20120716103948.352501068@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/smpboot.c')
-rw-r--r--kernel/smpboot.c229
1 files changed, 229 insertions, 0 deletions
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 98f60c5caa1b..9d5f7b04025d 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -1,11 +1,17 @@
1/* 1/*
2 * Common SMP CPU bringup/teardown functions 2 * Common SMP CPU bringup/teardown functions
3 */ 3 */
4#include <linux/cpu.h>
4#include <linux/err.h> 5#include <linux/err.h>
5#include <linux/smp.h> 6#include <linux/smp.h>
6#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/list.h>
9#include <linux/slab.h>
7#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/export.h>
8#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/kthread.h>
14#include <linux/smpboot.h>
9 15
10#include "smpboot.h" 16#include "smpboot.h"
11 17
@@ -65,3 +71,226 @@ void __init idle_threads_init(void)
65 } 71 }
66} 72}
67#endif 73#endif
74
75static LIST_HEAD(hotplug_threads);
76static DEFINE_MUTEX(smpboot_threads_lock);
77
78struct smpboot_thread_data {
79 unsigned int cpu;
80 unsigned int status;
81 struct smp_hotplug_thread *ht;
82};
83
84enum {
85 HP_THREAD_NONE = 0,
86 HP_THREAD_ACTIVE,
87 HP_THREAD_PARKED,
88};
89
90/**
91 * smpboot_thread_fn - percpu hotplug thread loop function
92 * @data: thread data pointer
93 *
94 * Checks for thread stop and park conditions. Calls the necessary
95 * setup, cleanup, park and unpark functions for the registered
96 * thread.
97 *
98 * Returns 1 when the thread should exit, 0 otherwise.
99 */
100static int smpboot_thread_fn(void *data)
101{
102 struct smpboot_thread_data *td = data;
103 struct smp_hotplug_thread *ht = td->ht;
104
105 while (1) {
106 set_current_state(TASK_INTERRUPTIBLE);
107 preempt_disable();
108 if (kthread_should_stop()) {
109 set_current_state(TASK_RUNNING);
110 preempt_enable();
111 if (ht->cleanup)
112 ht->cleanup(td->cpu, cpu_online(td->cpu));
113 kfree(td);
114 return 0;
115 }
116
117 if (kthread_should_park()) {
118 __set_current_state(TASK_RUNNING);
119 preempt_enable();
120 if (ht->park && td->status == HP_THREAD_ACTIVE) {
121 BUG_ON(td->cpu != smp_processor_id());
122 ht->park(td->cpu);
123 td->status = HP_THREAD_PARKED;
124 }
125 kthread_parkme();
126 /* We might have been woken for stop */
127 continue;
128 }
129
130 BUG_ON(td->cpu != smp_processor_id());
131
132 /* Check for state change setup */
133 switch (td->status) {
134 case HP_THREAD_NONE:
135 preempt_enable();
136 if (ht->setup)
137 ht->setup(td->cpu);
138 td->status = HP_THREAD_ACTIVE;
139 preempt_disable();
140 break;
141 case HP_THREAD_PARKED:
142 preempt_enable();
143 if (ht->unpark)
144 ht->unpark(td->cpu);
145 td->status = HP_THREAD_ACTIVE;
146 preempt_disable();
147 break;
148 }
149
150 if (!ht->thread_should_run(td->cpu)) {
151 preempt_enable();
152 schedule();
153 } else {
154 set_current_state(TASK_RUNNING);
155 preempt_enable();
156 ht->thread_fn(td->cpu);
157 }
158 }
159}
160
161static int
162__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
163{
164 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
165 struct smpboot_thread_data *td;
166
167 if (tsk)
168 return 0;
169
170 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
171 if (!td)
172 return -ENOMEM;
173 td->cpu = cpu;
174 td->ht = ht;
175
176 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
177 ht->thread_comm);
178 if (IS_ERR(tsk)) {
179 kfree(td);
180 return PTR_ERR(tsk);
181 }
182
183 get_task_struct(tsk);
184 *per_cpu_ptr(ht->store, cpu) = tsk;
185 return 0;
186}
187
188int smpboot_create_threads(unsigned int cpu)
189{
190 struct smp_hotplug_thread *cur;
191 int ret = 0;
192
193 mutex_lock(&smpboot_threads_lock);
194 list_for_each_entry(cur, &hotplug_threads, list) {
195 ret = __smpboot_create_thread(cur, cpu);
196 if (ret)
197 break;
198 }
199 mutex_unlock(&smpboot_threads_lock);
200 return ret;
201}
202
203static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
204{
205 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
206
207 kthread_unpark(tsk);
208}
209
210void smpboot_unpark_threads(unsigned int cpu)
211{
212 struct smp_hotplug_thread *cur;
213
214 mutex_lock(&smpboot_threads_lock);
215 list_for_each_entry(cur, &hotplug_threads, list)
216 smpboot_unpark_thread(cur, cpu);
217 mutex_unlock(&smpboot_threads_lock);
218}
219
220static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
221{
222 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
223
224 if (tsk)
225 kthread_park(tsk);
226}
227
228void smpboot_park_threads(unsigned int cpu)
229{
230 struct smp_hotplug_thread *cur;
231
232 mutex_lock(&smpboot_threads_lock);
233 list_for_each_entry_reverse(cur, &hotplug_threads, list)
234 smpboot_park_thread(cur, cpu);
235 mutex_unlock(&smpboot_threads_lock);
236}
237
238static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
239{
240 unsigned int cpu;
241
242 /* We need to destroy also the parked threads of offline cpus */
243 for_each_possible_cpu(cpu) {
244 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
245
246 if (tsk) {
247 kthread_stop(tsk);
248 put_task_struct(tsk);
249 *per_cpu_ptr(ht->store, cpu) = NULL;
250 }
251 }
252}
253
254/**
255 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
256 * @plug_thread: Hotplug thread descriptor
257 *
258 * Creates and starts the threads on all online cpus.
259 */
260int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
261{
262 unsigned int cpu;
263 int ret = 0;
264
265 mutex_lock(&smpboot_threads_lock);
266 for_each_online_cpu(cpu) {
267 ret = __smpboot_create_thread(plug_thread, cpu);
268 if (ret) {
269 smpboot_destroy_threads(plug_thread);
270 goto out;
271 }
272 smpboot_unpark_thread(plug_thread, cpu);
273 }
274 list_add(&plug_thread->list, &hotplug_threads);
275out:
276 mutex_unlock(&smpboot_threads_lock);
277 return ret;
278}
279EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
280
281/**
282 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
283 * @plug_thread: Hotplug thread descriptor
284 *
285 * Stops all threads on all possible cpus.
286 */
287void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
288{
289 get_online_cpus();
290 mutex_lock(&smpboot_threads_lock);
291 list_del(&plug_thread->list);
292 smpboot_destroy_threads(plug_thread);
293 mutex_unlock(&smpboot_threads_lock);
294 put_online_cpus();
295}
296EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);