aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kthread.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-07-16 06:42:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-08-13 11:01:06 -0400
commit2a1d446019f9a5983ec5a335b95e8593fdb6fa2e (patch)
treed4867984d3655cc2e0805534601b94c7ab92bcd7 /kernel/kthread.c
parent5d01bbd111d6ff9ea9d9847774f66dff39633776 (diff)
kthread: Implement park/unpark facility
To avoid the full teardown/setup of per cpu kthreads in the case of cpu hot(un)plug, provide a facility which allows to put the kthread into a park position and unpark it when the cpu comes online again. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20120716103948.236618824@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r--kernel/kthread.c185
1 files changed, 166 insertions, 19 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b579af57ea10..146a6fa96825 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -37,11 +37,20 @@ struct kthread_create_info
37}; 37};
38 38
39struct kthread { 39struct kthread {
40 int should_stop; 40 unsigned long flags;
41 unsigned int cpu;
41 void *data; 42 void *data;
43 struct completion parked;
42 struct completion exited; 44 struct completion exited;
43}; 45};
44 46
47enum KTHREAD_BITS {
48 KTHREAD_IS_PER_CPU = 0,
49 KTHREAD_SHOULD_STOP,
50 KTHREAD_SHOULD_PARK,
51 KTHREAD_IS_PARKED,
52};
53
45#define to_kthread(tsk) \ 54#define to_kthread(tsk) \
46 container_of((tsk)->vfork_done, struct kthread, exited) 55 container_of((tsk)->vfork_done, struct kthread, exited)
47 56
@@ -52,13 +61,29 @@ struct kthread {
52 * and this will return true. You should then return, and your return 61 * and this will return true. You should then return, and your return
53 * value will be passed through to kthread_stop(). 62 * value will be passed through to kthread_stop().
54 */ 63 */
55int kthread_should_stop(void) 64bool kthread_should_stop(void)
56{ 65{
57 return to_kthread(current)->should_stop; 66 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
58} 67}
59EXPORT_SYMBOL(kthread_should_stop); 68EXPORT_SYMBOL(kthread_should_stop);
60 69
61/** 70/**
71 * kthread_should_park - should this kthread park now?
72 *
73 * When someone calls kthread_park() on your kthread, it will be woken
74 * and this will return true. You should then do the necessary
75 * cleanup and call kthread_parkme()
76 *
77 * Similar to kthread_should_stop(), but this keeps the thread alive
78 * and in a park position. kthread_unpark() "restarts" the thread and
79 * calls the thread function again.
80 */
81bool kthread_should_park(void)
82{
83 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
84}
85
86/**
62 * kthread_freezable_should_stop - should this freezable kthread return now? 87 * kthread_freezable_should_stop - should this freezable kthread return now?
63 * @was_frozen: optional out parameter, indicates whether %current was frozen 88 * @was_frozen: optional out parameter, indicates whether %current was frozen
64 * 89 *
@@ -96,6 +121,24 @@ void *kthread_data(struct task_struct *task)
96 return to_kthread(task)->data; 121 return to_kthread(task)->data;
97} 122}
98 123
124static void __kthread_parkme(struct kthread *self)
125{
126 __set_current_state(TASK_INTERRUPTIBLE);
127 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
128 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
129 complete(&self->parked);
130 schedule();
131 __set_current_state(TASK_INTERRUPTIBLE);
132 }
133 clear_bit(KTHREAD_IS_PARKED, &self->flags);
134 __set_current_state(TASK_RUNNING);
135}
136
137void kthread_parkme(void)
138{
139 __kthread_parkme(to_kthread(current));
140}
141
99static int kthread(void *_create) 142static int kthread(void *_create)
100{ 143{
101 /* Copy data: it's on kthread's stack */ 144 /* Copy data: it's on kthread's stack */
@@ -105,9 +148,10 @@ static int kthread(void *_create)
105 struct kthread self; 148 struct kthread self;
106 int ret; 149 int ret;
107 150
108 self.should_stop = 0; 151 self.flags = 0;
109 self.data = data; 152 self.data = data;
110 init_completion(&self.exited); 153 init_completion(&self.exited);
154 init_completion(&self.parked);
111 current->vfork_done = &self.exited; 155 current->vfork_done = &self.exited;
112 156
113 /* OK, tell user we're spawned, wait for stop or wakeup */ 157 /* OK, tell user we're spawned, wait for stop or wakeup */
@@ -117,9 +161,11 @@ static int kthread(void *_create)
117 schedule(); 161 schedule();
118 162
119 ret = -EINTR; 163 ret = -EINTR;
120 if (!self.should_stop)
121 ret = threadfn(data);
122 164
165 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
166 __kthread_parkme(&self);
167 ret = threadfn(data);
168 }
123 /* we can't just return, we must preserve "self" on stack */ 169 /* we can't just return, we must preserve "self" on stack */
124 do_exit(ret); 170 do_exit(ret);
125} 171}
@@ -172,8 +218,7 @@ static void create_kthread(struct kthread_create_info *create)
172 * Returns a task_struct or ERR_PTR(-ENOMEM). 218 * Returns a task_struct or ERR_PTR(-ENOMEM).
173 */ 219 */
174struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 220struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
175 void *data, 221 void *data, int node,
176 int node,
177 const char namefmt[], 222 const char namefmt[],
178 ...) 223 ...)
179{ 224{
@@ -210,6 +255,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
210} 255}
211EXPORT_SYMBOL(kthread_create_on_node); 256EXPORT_SYMBOL(kthread_create_on_node);
212 257
258static void __kthread_bind(struct task_struct *p, unsigned int cpu)
259{
260 /* It's safe because the task is inactive. */
261 do_set_cpus_allowed(p, cpumask_of(cpu));
262 p->flags |= PF_THREAD_BOUND;
263}
264
213/** 265/**
214 * kthread_bind - bind a just-created kthread to a cpu. 266 * kthread_bind - bind a just-created kthread to a cpu.
215 * @p: thread created by kthread_create(). 267 * @p: thread created by kthread_create().
@@ -226,14 +278,112 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
226 WARN_ON(1); 278 WARN_ON(1);
227 return; 279 return;
228 } 280 }
229 281 __kthread_bind(p, cpu);
230 /* It's safe because the task is inactive. */
231 do_set_cpus_allowed(p, cpumask_of(cpu));
232 p->flags |= PF_THREAD_BOUND;
233} 282}
234EXPORT_SYMBOL(kthread_bind); 283EXPORT_SYMBOL(kthread_bind);
235 284
236/** 285/**
286 * kthread_create_on_cpu - Create a cpu bound kthread
287 * @threadfn: the function to run until signal_pending(current).
288 * @data: data ptr for @threadfn.
289 * @cpu: The cpu on which the thread should be bound,
290 * @namefmt: printf-style name for the thread. Format is restricted
291 * to "name.*%u". Code fills in cpu number.
292 *
293 * Description: This helper function creates and names a kernel thread
294 * The thread will be woken and put into park mode.
295 */
296struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
297 void *data, unsigned int cpu,
298 const char *namefmt)
299{
300 struct task_struct *p;
301
302 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
303 cpu);
304 if (IS_ERR(p))
305 return p;
306 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
307 to_kthread(p)->cpu = cpu;
308 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
309 kthread_park(p);
310 return p;
311}
312
313static struct kthread *task_get_live_kthread(struct task_struct *k)
314{
315 struct kthread *kthread;
316
317 get_task_struct(k);
318 kthread = to_kthread(k);
319 /* It might have exited */
320 barrier();
321 if (k->vfork_done != NULL)
322 return kthread;
323 return NULL;
324}
325
326/**
327 * kthread_unpark - unpark a thread created by kthread_create().
328 * @k: thread created by kthread_create().
329 *
330 * Sets kthread_should_park() for @k to return false, wakes it, and
331 * waits for it to return. If the thread is marked percpu then its
332 * bound to the cpu again.
333 */
334void kthread_unpark(struct task_struct *k)
335{
336 struct kthread *kthread = task_get_live_kthread(k);
337
338 if (kthread) {
339 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
340 /*
341 * We clear the IS_PARKED bit here as we don't wait
342 * until the task has left the park code. So if we'd
343 * park before that happens we'd see the IS_PARKED bit
344 * which might be about to be cleared.
345 */
346 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
347 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
348 __kthread_bind(k, kthread->cpu);
349 wake_up_process(k);
350 }
351 }
352 put_task_struct(k);
353}
354
355/**
356 * kthread_park - park a thread created by kthread_create().
357 * @k: thread created by kthread_create().
358 *
359 * Sets kthread_should_park() for @k to return true, wakes it, and
360 * waits for it to return. This can also be called after kthread_create()
361 * instead of calling wake_up_process(): the thread will park without
362 * calling threadfn().
363 *
364 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
365 * If called by the kthread itself just the park bit is set.
366 */
367int kthread_park(struct task_struct *k)
368{
369 struct kthread *kthread = task_get_live_kthread(k);
370 int ret = -ENOSYS;
371
372 if (kthread) {
373 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
374 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
375 if (k != current) {
376 wake_up_process(k);
377 wait_for_completion(&kthread->parked);
378 }
379 }
380 ret = 0;
381 }
382 put_task_struct(k);
383 return ret;
384}
385
386/**
237 * kthread_stop - stop a thread created by kthread_create(). 387 * kthread_stop - stop a thread created by kthread_create().
238 * @k: thread created by kthread_create(). 388 * @k: thread created by kthread_create().
239 * 389 *
@@ -250,16 +400,13 @@ EXPORT_SYMBOL(kthread_bind);
250 */ 400 */
251int kthread_stop(struct task_struct *k) 401int kthread_stop(struct task_struct *k)
252{ 402{
253 struct kthread *kthread; 403 struct kthread *kthread = task_get_live_kthread(k);
254 int ret; 404 int ret;
255 405
256 trace_sched_kthread_stop(k); 406 trace_sched_kthread_stop(k);
257 get_task_struct(k); 407 if (kthread) {
258 408 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
259 kthread = to_kthread(k); 409 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
260 barrier(); /* it might have exited */
261 if (k->vfork_done != NULL) {
262 kthread->should_stop = 1;
263 wake_up_process(k); 410 wake_up_process(k);
264 wait_for_completion(&kthread->exited); 411 wait_for_completion(&kthread->exited);
265 } 412 }