aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kthread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r--kernel/kthread.c185
1 files changed, 166 insertions, 19 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 7ba65c1aa6b3..29fb60caecb5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -38,11 +38,20 @@ struct kthread_create_info
38}; 38};
39 39
40struct kthread { 40struct kthread {
41 int should_stop; 41 unsigned long flags;
42 unsigned int cpu;
42 void *data; 43 void *data;
44 struct completion parked;
43 struct completion exited; 45 struct completion exited;
44}; 46};
45 47
48enum KTHREAD_BITS {
49 KTHREAD_IS_PER_CPU = 0,
50 KTHREAD_SHOULD_STOP,
51 KTHREAD_SHOULD_PARK,
52 KTHREAD_IS_PARKED,
53};
54
46#define to_kthread(tsk) \ 55#define to_kthread(tsk) \
47 container_of((tsk)->vfork_done, struct kthread, exited) 56 container_of((tsk)->vfork_done, struct kthread, exited)
48 57
@@ -53,13 +62,29 @@ struct kthread {
53 * and this will return true. You should then return, and your return 62 * and this will return true. You should then return, and your return
54 * value will be passed through to kthread_stop(). 63 * value will be passed through to kthread_stop().
55 */ 64 */
56int kthread_should_stop(void) 65bool kthread_should_stop(void)
57{ 66{
58 return to_kthread(current)->should_stop; 67 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
59} 68}
60EXPORT_SYMBOL(kthread_should_stop); 69EXPORT_SYMBOL(kthread_should_stop);
61 70
62/** 71/**
72 * kthread_should_park - should this kthread park now?
73 *
74 * When someone calls kthread_park() on your kthread, it will be woken
75 * and this will return true. You should then do the necessary
76 * cleanup and call kthread_parkme()
77 *
78 * Similar to kthread_should_stop(), but this keeps the thread alive
79 * and in a park position. kthread_unpark() "restarts" the thread and
80 * calls the thread function again.
81 */
82bool kthread_should_park(void)
83{
84 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
85}
86
87/**
63 * kthread_freezable_should_stop - should this freezable kthread return now? 88 * kthread_freezable_should_stop - should this freezable kthread return now?
64 * @was_frozen: optional out parameter, indicates whether %current was frozen 89 * @was_frozen: optional out parameter, indicates whether %current was frozen
65 * 90 *
@@ -97,6 +122,24 @@ void *kthread_data(struct task_struct *task)
97 return to_kthread(task)->data; 122 return to_kthread(task)->data;
98} 123}
99 124
125static void __kthread_parkme(struct kthread *self)
126{
127 __set_current_state(TASK_INTERRUPTIBLE);
128 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
129 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
130 complete(&self->parked);
131 schedule();
132 __set_current_state(TASK_INTERRUPTIBLE);
133 }
134 clear_bit(KTHREAD_IS_PARKED, &self->flags);
135 __set_current_state(TASK_RUNNING);
136}
137
138void kthread_parkme(void)
139{
140 __kthread_parkme(to_kthread(current));
141}
142
100static int kthread(void *_create) 143static int kthread(void *_create)
101{ 144{
102 /* Copy data: it's on kthread's stack */ 145 /* Copy data: it's on kthread's stack */
@@ -106,9 +149,10 @@ static int kthread(void *_create)
106 struct kthread self; 149 struct kthread self;
107 int ret; 150 int ret;
108 151
109 self.should_stop = 0; 152 self.flags = 0;
110 self.data = data; 153 self.data = data;
111 init_completion(&self.exited); 154 init_completion(&self.exited);
155 init_completion(&self.parked);
112 current->vfork_done = &self.exited; 156 current->vfork_done = &self.exited;
113 157
114 /* OK, tell user we're spawned, wait for stop or wakeup */ 158 /* OK, tell user we're spawned, wait for stop or wakeup */
@@ -118,9 +162,11 @@ static int kthread(void *_create)
118 schedule(); 162 schedule();
119 163
120 ret = -EINTR; 164 ret = -EINTR;
121 if (!self.should_stop)
122 ret = threadfn(data);
123 165
166 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
167 __kthread_parkme(&self);
168 ret = threadfn(data);
169 }
124 /* we can't just return, we must preserve "self" on stack */ 170 /* we can't just return, we must preserve "self" on stack */
125 do_exit(ret); 171 do_exit(ret);
126} 172}
@@ -173,8 +219,7 @@ static void create_kthread(struct kthread_create_info *create)
173 * Returns a task_struct or ERR_PTR(-ENOMEM). 219 * Returns a task_struct or ERR_PTR(-ENOMEM).
174 */ 220 */
175struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), 221struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
176 void *data, 222 void *data, int node,
177 int node,
178 const char namefmt[], 223 const char namefmt[],
179 ...) 224 ...)
180{ 225{
@@ -211,6 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
211} 256}
212EXPORT_SYMBOL(kthread_create_on_node); 257EXPORT_SYMBOL(kthread_create_on_node);
213 258
259static void __kthread_bind(struct task_struct *p, unsigned int cpu)
260{
261 /* It's safe because the task is inactive. */
262 do_set_cpus_allowed(p, cpumask_of(cpu));
263 p->flags |= PF_THREAD_BOUND;
264}
265
214/** 266/**
215 * kthread_bind - bind a just-created kthread to a cpu. 267 * kthread_bind - bind a just-created kthread to a cpu.
216 * @p: thread created by kthread_create(). 268 * @p: thread created by kthread_create().
@@ -227,14 +279,112 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
227 WARN_ON(1); 279 WARN_ON(1);
228 return; 280 return;
229 } 281 }
230 282 __kthread_bind(p, cpu);
231 /* It's safe because the task is inactive. */
232 do_set_cpus_allowed(p, cpumask_of(cpu));
233 p->flags |= PF_THREAD_BOUND;
234} 283}
235EXPORT_SYMBOL(kthread_bind); 284EXPORT_SYMBOL(kthread_bind);
236 285
237/** 286/**
287 * kthread_create_on_cpu - Create a cpu bound kthread
288 * @threadfn: the function to run until signal_pending(current).
289 * @data: data ptr for @threadfn.
290 * @cpu: The cpu on which the thread should be bound,
291 * @namefmt: printf-style name for the thread. Format is restricted
292 * to "name.*%u". Code fills in cpu number.
293 *
294 * Description: This helper function creates and names a kernel thread
295 * The thread will be woken and put into park mode.
296 */
297struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
298 void *data, unsigned int cpu,
299 const char *namefmt)
300{
301 struct task_struct *p;
302
303 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
304 cpu);
305 if (IS_ERR(p))
306 return p;
307 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
308 to_kthread(p)->cpu = cpu;
309 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
310 kthread_park(p);
311 return p;
312}
313
314static struct kthread *task_get_live_kthread(struct task_struct *k)
315{
316 struct kthread *kthread;
317
318 get_task_struct(k);
319 kthread = to_kthread(k);
320 /* It might have exited */
321 barrier();
322 if (k->vfork_done != NULL)
323 return kthread;
324 return NULL;
325}
326
327/**
328 * kthread_unpark - unpark a thread created by kthread_create().
329 * @k: thread created by kthread_create().
330 *
331 * Sets kthread_should_park() for @k to return false, wakes it, and
332 * waits for it to return. If the thread is marked percpu then its
333 * bound to the cpu again.
334 */
335void kthread_unpark(struct task_struct *k)
336{
337 struct kthread *kthread = task_get_live_kthread(k);
338
339 if (kthread) {
340 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
341 /*
342 * We clear the IS_PARKED bit here as we don't wait
343 * until the task has left the park code. So if we'd
344 * park before that happens we'd see the IS_PARKED bit
345 * which might be about to be cleared.
346 */
347 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
348 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
349 __kthread_bind(k, kthread->cpu);
350 wake_up_process(k);
351 }
352 }
353 put_task_struct(k);
354}
355
356/**
357 * kthread_park - park a thread created by kthread_create().
358 * @k: thread created by kthread_create().
359 *
360 * Sets kthread_should_park() for @k to return true, wakes it, and
361 * waits for it to return. This can also be called after kthread_create()
362 * instead of calling wake_up_process(): the thread will park without
363 * calling threadfn().
364 *
365 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
366 * If called by the kthread itself just the park bit is set.
367 */
368int kthread_park(struct task_struct *k)
369{
370 struct kthread *kthread = task_get_live_kthread(k);
371 int ret = -ENOSYS;
372
373 if (kthread) {
374 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
375 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
376 if (k != current) {
377 wake_up_process(k);
378 wait_for_completion(&kthread->parked);
379 }
380 }
381 ret = 0;
382 }
383 put_task_struct(k);
384 return ret;
385}
386
387/**
238 * kthread_stop - stop a thread created by kthread_create(). 388 * kthread_stop - stop a thread created by kthread_create().
239 * @k: thread created by kthread_create(). 389 * @k: thread created by kthread_create().
240 * 390 *
@@ -251,16 +401,13 @@ EXPORT_SYMBOL(kthread_bind);
251 */ 401 */
252int kthread_stop(struct task_struct *k) 402int kthread_stop(struct task_struct *k)
253{ 403{
254 struct kthread *kthread; 404 struct kthread *kthread = task_get_live_kthread(k);
255 int ret; 405 int ret;
256 406
257 trace_sched_kthread_stop(k); 407 trace_sched_kthread_stop(k);
258 get_task_struct(k); 408 if (kthread) {
259 409 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
260 kthread = to_kthread(k); 410 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
261 barrier(); /* it might have exited */
262 if (k->vfork_done != NULL) {
263 kthread->should_stop = 1;
264 wake_up_process(k); 411 wake_up_process(k);
265 wait_for_completion(&kthread->exited); 412 wait_for_completion(&kthread->exited);
266 } 413 }