diff options
-rw-r--r-- | include/linux/kthread.h | 1 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/kthread.c | 58 |
3 files changed, 48 insertions, 13 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index a6e82a69c363..c1c3e63d52c1 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
@@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), | |||
48 | __k; \ | 48 | __k; \ |
49 | }) | 49 | }) |
50 | 50 | ||
51 | void free_kthread_struct(struct task_struct *k); | ||
51 | void kthread_bind(struct task_struct *k, unsigned int cpu); | 52 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
52 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); | 53 | void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); |
53 | int kthread_stop(struct task_struct *k); | 54 | int kthread_stop(struct task_struct *k); |
diff --git a/kernel/fork.c b/kernel/fork.c index 600e93b5e539..7ffa16033ded 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -354,6 +354,8 @@ void free_task(struct task_struct *tsk) | |||
354 | ftrace_graph_exit_task(tsk); | 354 | ftrace_graph_exit_task(tsk); |
355 | put_seccomp_filter(tsk); | 355 | put_seccomp_filter(tsk); |
356 | arch_release_task_struct(tsk); | 356 | arch_release_task_struct(tsk); |
357 | if (tsk->flags & PF_KTHREAD) | ||
358 | free_kthread_struct(tsk); | ||
357 | free_task_struct(tsk); | 359 | free_task_struct(tsk); |
358 | } | 360 | } |
359 | EXPORT_SYMBOL(free_task); | 361 | EXPORT_SYMBOL(free_task); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index be2cc1f9dd57..9d64b6526d0b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -53,14 +53,38 @@ enum KTHREAD_BITS { | |||
53 | KTHREAD_IS_PARKED, | 53 | KTHREAD_IS_PARKED, |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #define __to_kthread(vfork) \ | 56 | static inline void set_kthread_struct(void *kthread) |
57 | container_of(vfork, struct kthread, exited) | 57 | { |
58 | /* | ||
59 | * We abuse ->set_child_tid to avoid the new member and because it | ||
60 | * can't be wrongly copied by copy_process(). We also rely on fact | ||
61 | * that the caller can't exec, so PF_KTHREAD can't be cleared. | ||
62 | */ | ||
63 | current->set_child_tid = (__force void __user *)kthread; | ||
64 | } | ||
58 | 65 | ||
59 | static inline struct kthread *to_kthread(struct task_struct *k) | 66 | static inline struct kthread *to_kthread(struct task_struct *k) |
60 | { | 67 | { |
61 | return __to_kthread(k->vfork_done); | 68 | WARN_ON(!(k->flags & PF_KTHREAD)); |
69 | return (__force void *)k->set_child_tid; | ||
70 | } | ||
71 | |||
72 | void free_kthread_struct(struct task_struct *k) | ||
73 | { | ||
74 | /* | ||
75 | * Can be NULL if this kthread was created by kernel_thread() | ||
76 | * or if kmalloc() in kthread() failed. | ||
77 | */ | ||
78 | kfree(to_kthread(k)); | ||
62 | } | 79 | } |
63 | 80 | ||
81 | #define __to_kthread(vfork) \ | ||
82 | container_of(vfork, struct kthread, exited) | ||
83 | |||
84 | /* | ||
85 | * TODO: kill it and use to_kthread(). But we still need the users | ||
86 | * like kthread_stop() which has to sync with the exiting kthread. | ||
87 | */ | ||
64 | static struct kthread *to_live_kthread(struct task_struct *k) | 88 | static struct kthread *to_live_kthread(struct task_struct *k) |
65 | { | 89 | { |
66 | struct completion *vfork = ACCESS_ONCE(k->vfork_done); | 90 | struct completion *vfork = ACCESS_ONCE(k->vfork_done); |
@@ -181,14 +205,11 @@ static int kthread(void *_create) | |||
181 | int (*threadfn)(void *data) = create->threadfn; | 205 | int (*threadfn)(void *data) = create->threadfn; |
182 | void *data = create->data; | 206 | void *data = create->data; |
183 | struct completion *done; | 207 | struct completion *done; |
184 | struct kthread self; | 208 | struct kthread *self; |
185 | int ret; | 209 | int ret; |
186 | 210 | ||
187 | self.flags = 0; | 211 | self = kmalloc(sizeof(*self), GFP_KERNEL); |
188 | self.data = data; | 212 | set_kthread_struct(self); |
189 | init_completion(&self.exited); | ||
190 | init_completion(&self.parked); | ||
191 | current->vfork_done = &self.exited; | ||
192 | 213 | ||
193 | /* If user was SIGKILLed, I release the structure. */ | 214 | /* If user was SIGKILLed, I release the structure. */ |
194 | done = xchg(&create->done, NULL); | 215 | done = xchg(&create->done, NULL); |
@@ -196,6 +217,19 @@ static int kthread(void *_create) | |||
196 | kfree(create); | 217 | kfree(create); |
197 | do_exit(-EINTR); | 218 | do_exit(-EINTR); |
198 | } | 219 | } |
220 | |||
221 | if (!self) { | ||
222 | create->result = ERR_PTR(-ENOMEM); | ||
223 | complete(done); | ||
224 | do_exit(-ENOMEM); | ||
225 | } | ||
226 | |||
227 | self->flags = 0; | ||
228 | self->data = data; | ||
229 | init_completion(&self->exited); | ||
230 | init_completion(&self->parked); | ||
231 | current->vfork_done = &self->exited; | ||
232 | |||
199 | /* OK, tell user we're spawned, wait for stop or wakeup */ | 233 | /* OK, tell user we're spawned, wait for stop or wakeup */ |
200 | __set_current_state(TASK_UNINTERRUPTIBLE); | 234 | __set_current_state(TASK_UNINTERRUPTIBLE); |
201 | create->result = current; | 235 | create->result = current; |
@@ -203,12 +237,10 @@ static int kthread(void *_create) | |||
203 | schedule(); | 237 | schedule(); |
204 | 238 | ||
205 | ret = -EINTR; | 239 | ret = -EINTR; |
206 | 240 | if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { | |
207 | if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { | 241 | __kthread_parkme(self); |
208 | __kthread_parkme(&self); | ||
209 | ret = threadfn(data); | 242 | ret = threadfn(data); |
210 | } | 243 | } |
211 | /* we can't just return, we must preserve "self" on stack */ | ||
212 | do_exit(ret); | 244 | do_exit(ret); |
213 | } | 245 | } |
214 | 246 | ||