diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
| commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
| tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/kthread.c | |
| parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
| parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) | |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kthread.c')
| -rw-r--r-- | kernel/kthread.c | 90 |
1 files changed, 38 insertions, 52 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index 7fa441333529..eb8751aa0418 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -27,7 +27,6 @@ struct kthread_create_info | |||
| 27 | /* Information passed to kthread() from kthreadd. */ | 27 | /* Information passed to kthread() from kthreadd. */ |
| 28 | int (*threadfn)(void *data); | 28 | int (*threadfn)(void *data); |
| 29 | void *data; | 29 | void *data; |
| 30 | struct completion started; | ||
| 31 | 30 | ||
| 32 | /* Result passed back to kthread_create() from kthreadd. */ | 31 | /* Result passed back to kthread_create() from kthreadd. */ |
| 33 | struct task_struct *result; | 32 | struct task_struct *result; |
| @@ -36,17 +35,13 @@ struct kthread_create_info | |||
| 36 | struct list_head list; | 35 | struct list_head list; |
| 37 | }; | 36 | }; |
| 38 | 37 | ||
| 39 | struct kthread_stop_info | 38 | struct kthread { |
| 40 | { | 39 | int should_stop; |
| 41 | struct task_struct *k; | 40 | struct completion exited; |
| 42 | int err; | ||
| 43 | struct completion done; | ||
| 44 | }; | 41 | }; |
| 45 | 42 | ||
| 46 | /* Thread stopping is done by setthing this var: lock serializes | 43 | #define to_kthread(tsk) \ |
| 47 | * multiple kthread_stop calls. */ | 44 | container_of((tsk)->vfork_done, struct kthread, exited) |
| 48 | static DEFINE_MUTEX(kthread_stop_lock); | ||
| 49 | static struct kthread_stop_info kthread_stop_info; | ||
| 50 | 45 | ||
| 51 | /** | 46 | /** |
| 52 | * kthread_should_stop - should this kthread return now? | 47 | * kthread_should_stop - should this kthread return now? |
| @@ -57,36 +52,35 @@ static struct kthread_stop_info kthread_stop_info; | |||
| 57 | */ | 52 | */ |
| 58 | int kthread_should_stop(void) | 53 | int kthread_should_stop(void) |
| 59 | { | 54 | { |
| 60 | return (kthread_stop_info.k == current); | 55 | return to_kthread(current)->should_stop; |
| 61 | } | 56 | } |
| 62 | EXPORT_SYMBOL(kthread_should_stop); | 57 | EXPORT_SYMBOL(kthread_should_stop); |
| 63 | 58 | ||
| 64 | static int kthread(void *_create) | 59 | static int kthread(void *_create) |
| 65 | { | 60 | { |
| 61 | /* Copy data: it's on kthread's stack */ | ||
| 66 | struct kthread_create_info *create = _create; | 62 | struct kthread_create_info *create = _create; |
| 67 | int (*threadfn)(void *data); | 63 | int (*threadfn)(void *data) = create->threadfn; |
| 68 | void *data; | 64 | void *data = create->data; |
| 69 | int ret = -EINTR; | 65 | struct kthread self; |
| 66 | int ret; | ||
| 70 | 67 | ||
| 71 | /* Copy data: it's on kthread's stack */ | 68 | self.should_stop = 0; |
| 72 | threadfn = create->threadfn; | 69 | init_completion(&self.exited); |
| 73 | data = create->data; | 70 | current->vfork_done = &self.exited; |
| 74 | 71 | ||
| 75 | /* OK, tell user we're spawned, wait for stop or wakeup */ | 72 | /* OK, tell user we're spawned, wait for stop or wakeup */ |
| 76 | __set_current_state(TASK_UNINTERRUPTIBLE); | 73 | __set_current_state(TASK_UNINTERRUPTIBLE); |
| 77 | create->result = current; | 74 | create->result = current; |
| 78 | complete(&create->started); | 75 | complete(&create->done); |
| 79 | schedule(); | 76 | schedule(); |
| 80 | 77 | ||
| 81 | if (!kthread_should_stop()) | 78 | ret = -EINTR; |
| 79 | if (!self.should_stop) | ||
| 82 | ret = threadfn(data); | 80 | ret = threadfn(data); |
| 83 | 81 | ||
| 84 | /* It might have exited on its own, w/o kthread_stop. Check. */ | 82 | /* we can't just return, we must preserve "self" on stack */ |
| 85 | if (kthread_should_stop()) { | 83 | do_exit(ret); |
| 86 | kthread_stop_info.err = ret; | ||
| 87 | complete(&kthread_stop_info.done); | ||
| 88 | } | ||
| 89 | return 0; | ||
| 90 | } | 84 | } |
| 91 | 85 | ||
| 92 | static void create_kthread(struct kthread_create_info *create) | 86 | static void create_kthread(struct kthread_create_info *create) |
| @@ -95,11 +89,10 @@ static void create_kthread(struct kthread_create_info *create) | |||
| 95 | 89 | ||
| 96 | /* We want our own signal handler (we take no signals by default). */ | 90 | /* We want our own signal handler (we take no signals by default). */ |
| 97 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); | 91 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); |
| 98 | if (pid < 0) | 92 | if (pid < 0) { |
| 99 | create->result = ERR_PTR(pid); | 93 | create->result = ERR_PTR(pid); |
| 100 | else | 94 | complete(&create->done); |
| 101 | wait_for_completion(&create->started); | 95 | } |
| 102 | complete(&create->done); | ||
| 103 | } | 96 | } |
| 104 | 97 | ||
| 105 | /** | 98 | /** |
| @@ -130,7 +123,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
| 130 | 123 | ||
| 131 | create.threadfn = threadfn; | 124 | create.threadfn = threadfn; |
| 132 | create.data = data; | 125 | create.data = data; |
| 133 | init_completion(&create.started); | ||
| 134 | init_completion(&create.done); | 126 | init_completion(&create.done); |
| 135 | 127 | ||
| 136 | spin_lock(&kthread_create_lock); | 128 | spin_lock(&kthread_create_lock); |
| @@ -188,40 +180,34 @@ EXPORT_SYMBOL(kthread_bind); | |||
| 188 | * @k: thread created by kthread_create(). | 180 | * @k: thread created by kthread_create(). |
| 189 | * | 181 | * |
| 190 | * Sets kthread_should_stop() for @k to return true, wakes it, and | 182 | * Sets kthread_should_stop() for @k to return true, wakes it, and |
| 191 | * waits for it to exit. Your threadfn() must not call do_exit() | 183 | * waits for it to exit. This can also be called after kthread_create() |
| 192 | * itself if you use this function! This can also be called after | 184 | * instead of calling wake_up_process(): the thread will exit without |
| 193 | * kthread_create() instead of calling wake_up_process(): the thread | 185 | * calling threadfn(). |
| 194 | * will exit without calling threadfn(). | 186 | * |
| 187 | * If threadfn() may call do_exit() itself, the caller must ensure | ||
| 188 | * task_struct can't go away. | ||
| 195 | * | 189 | * |
| 196 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() | 190 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() |
| 197 | * was never called. | 191 | * was never called. |
| 198 | */ | 192 | */ |
| 199 | int kthread_stop(struct task_struct *k) | 193 | int kthread_stop(struct task_struct *k) |
| 200 | { | 194 | { |
| 195 | struct kthread *kthread; | ||
| 201 | int ret; | 196 | int ret; |
| 202 | 197 | ||
| 203 | mutex_lock(&kthread_stop_lock); | ||
| 204 | |||
| 205 | /* It could exit after stop_info.k set, but before wake_up_process. */ | ||
| 206 | get_task_struct(k); | ||
| 207 | |||
| 208 | trace_sched_kthread_stop(k); | 198 | trace_sched_kthread_stop(k); |
| 199 | get_task_struct(k); | ||
| 209 | 200 | ||
| 210 | /* Must init completion *before* thread sees kthread_stop_info.k */ | 201 | kthread = to_kthread(k); |
| 211 | init_completion(&kthread_stop_info.done); | 202 | barrier(); /* it might have exited */ |
| 212 | smp_wmb(); | 203 | if (k->vfork_done != NULL) { |
| 204 | kthread->should_stop = 1; | ||
| 205 | wake_up_process(k); | ||
| 206 | wait_for_completion(&kthread->exited); | ||
| 207 | } | ||
| 208 | ret = k->exit_code; | ||
| 213 | 209 | ||
| 214 | /* Now set kthread_should_stop() to true, and wake it up. */ | ||
| 215 | kthread_stop_info.k = k; | ||
| 216 | wake_up_process(k); | ||
| 217 | put_task_struct(k); | 210 | put_task_struct(k); |
| 218 | |||
| 219 | /* Once it dies, reset stop ptr, gather result and we're done. */ | ||
| 220 | wait_for_completion(&kthread_stop_info.done); | ||
| 221 | kthread_stop_info.k = NULL; | ||
| 222 | ret = kthread_stop_info.err; | ||
| 223 | mutex_unlock(&kthread_stop_lock); | ||
| 224 | |||
| 225 | trace_sched_kthread_stop_ret(ret); | 211 | trace_sched_kthread_stop_ret(ret); |
| 226 | 212 | ||
| 227 | return ret; | 213 | return ret; |
