aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kthread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r--kernel/kthread.c65
1 files changed, 42 insertions, 23 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9eb7fed0bbaa..760e86df8c20 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/freezer.h> 18#include <linux/freezer.h>
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
20#include <trace/events/sched.h> 21#include <trace/events/sched.h>
21 22
22static DEFINE_SPINLOCK(kthread_create_lock); 23static DEFINE_SPINLOCK(kthread_create_lock);
@@ -52,8 +53,21 @@ enum KTHREAD_BITS {
52 KTHREAD_IS_PARKED, 53 KTHREAD_IS_PARKED,
53}; 54};
54 55
55#define to_kthread(tsk) \ 56#define __to_kthread(vfork) \
56 container_of((tsk)->vfork_done, struct kthread, exited) 57 container_of(vfork, struct kthread, exited)
58
59static inline struct kthread *to_kthread(struct task_struct *k)
60{
61 return __to_kthread(k->vfork_done);
62}
63
64static struct kthread *to_live_kthread(struct task_struct *k)
65{
66 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
67 if (likely(vfork))
68 return __to_kthread(vfork);
69 return NULL;
70}
57 71
58/** 72/**
59 * kthread_should_stop - should this kthread return now? 73 * kthread_should_stop - should this kthread return now?
@@ -122,6 +136,24 @@ void *kthread_data(struct task_struct *task)
122 return to_kthread(task)->data; 136 return to_kthread(task)->data;
123} 137}
124 138
139/**
140 * probe_kthread_data - speculative version of kthread_data()
141 * @task: possible kthread task in question
142 *
143 * @task could be a kthread task. Return the data value specified when it
144 * was created if accessible. If @task isn't a kthread task or its data is
145 * inaccessible for any reason, %NULL is returned. This function requires
146 * that @task itself is safe to dereference.
147 */
148void *probe_kthread_data(struct task_struct *task)
149{
150 struct kthread *kthread = to_kthread(task);
151 void *data = NULL;
152
153 probe_kernel_read(&data, &kthread->data, sizeof(data));
154 return data;
155}
156
125static void __kthread_parkme(struct kthread *self) 157static void __kthread_parkme(struct kthread *self)
126{ 158{
127 __set_current_state(TASK_PARKED); 159 __set_current_state(TASK_PARKED);
@@ -265,7 +297,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
265 } 297 }
266 /* It's safe because the task is inactive. */ 298 /* It's safe because the task is inactive. */
267 do_set_cpus_allowed(p, cpumask_of(cpu)); 299 do_set_cpus_allowed(p, cpumask_of(cpu));
268 p->flags |= PF_THREAD_BOUND; 300 p->flags |= PF_NO_SETAFFINITY;
269} 301}
270 302
271/** 303/**
@@ -311,19 +343,6 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
311 return p; 343 return p;
312} 344}
313 345
314static struct kthread *task_get_live_kthread(struct task_struct *k)
315{
316 struct kthread *kthread;
317
318 get_task_struct(k);
319 kthread = to_kthread(k);
320 /* It might have exited */
321 barrier();
322 if (k->vfork_done != NULL)
323 return kthread;
324 return NULL;
325}
326
327static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) 346static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
328{ 347{
329 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 348 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
@@ -350,11 +369,10 @@ static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
350 */ 369 */
351void kthread_unpark(struct task_struct *k) 370void kthread_unpark(struct task_struct *k)
352{ 371{
353 struct kthread *kthread = task_get_live_kthread(k); 372 struct kthread *kthread = to_live_kthread(k);
354 373
355 if (kthread) 374 if (kthread)
356 __kthread_unpark(k, kthread); 375 __kthread_unpark(k, kthread);
357 put_task_struct(k);
358} 376}
359 377
360/** 378/**
@@ -371,7 +389,7 @@ void kthread_unpark(struct task_struct *k)
371 */ 389 */
372int kthread_park(struct task_struct *k) 390int kthread_park(struct task_struct *k)
373{ 391{
374 struct kthread *kthread = task_get_live_kthread(k); 392 struct kthread *kthread = to_live_kthread(k);
375 int ret = -ENOSYS; 393 int ret = -ENOSYS;
376 394
377 if (kthread) { 395 if (kthread) {
@@ -384,7 +402,6 @@ int kthread_park(struct task_struct *k)
384 } 402 }
385 ret = 0; 403 ret = 0;
386 } 404 }
387 put_task_struct(k);
388 return ret; 405 return ret;
389} 406}
390 407
@@ -405,10 +422,13 @@ int kthread_park(struct task_struct *k)
405 */ 422 */
406int kthread_stop(struct task_struct *k) 423int kthread_stop(struct task_struct *k)
407{ 424{
408 struct kthread *kthread = task_get_live_kthread(k); 425 struct kthread *kthread;
409 int ret; 426 int ret;
410 427
411 trace_sched_kthread_stop(k); 428 trace_sched_kthread_stop(k);
429
430 get_task_struct(k);
431 kthread = to_live_kthread(k);
412 if (kthread) { 432 if (kthread) {
413 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 433 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
414 __kthread_unpark(k, kthread); 434 __kthread_unpark(k, kthread);
@@ -416,10 +436,9 @@ int kthread_stop(struct task_struct *k)
416 wait_for_completion(&kthread->exited); 436 wait_for_completion(&kthread->exited);
417 } 437 }
418 ret = k->exit_code; 438 ret = k->exit_code;
419
420 put_task_struct(k); 439 put_task_struct(k);
421 trace_sched_kthread_stop_ret(ret);
422 440
441 trace_sched_kthread_stop_ret(ret);
423 return ret; 442 return ret;
424} 443}
425EXPORT_SYMBOL(kthread_stop); 444EXPORT_SYMBOL(kthread_stop);