aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2016-11-29 12:50:57 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-12-08 08:36:18 -0500
commit1da5c46fa965ff90f5ffc080b6ab3fae5e227bc3 (patch)
tree907dd44b43d3cf5f49be920c46a5592364bcb1e8
parent7c4788950ba5922fde976d80b72baf46f14dee8d (diff)
kthread: Make struct kthread kmalloc'ed
commit 23196f2e5f5d "kthread: Pin the stack via try_get_task_stack() / put_task_stack() in to_live_kthread() function" is a workaround for the fragile design of struct kthread being allocated on the task stack. struct kthread in its current form should be removed, but this needs cleanups outside of kthread.c. As a first step move struct kthread away from the task stack by making it kmalloc'ed. This allows to access kthread.exited without the magic of trying to pin task stack and the try logic in to_live_kthread(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Chunming Zhou <David1.Zhou@amd.com> Cc: Roman Pen <roman.penyaev@profitbricks.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Tejun Heo <tj@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20161129175057.GA5330@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/kthread.h1
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kthread.c58
3 files changed, 48 insertions, 13 deletions
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index a6e82a69c363..c1c3e63d52c1 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -48,6 +48,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
48 __k; \ 48 __k; \
49}) 49})
50 50
51void free_kthread_struct(struct task_struct *k);
51void kthread_bind(struct task_struct *k, unsigned int cpu); 52void kthread_bind(struct task_struct *k, unsigned int cpu);
52void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); 53void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
53int kthread_stop(struct task_struct *k); 54int kthread_stop(struct task_struct *k);
diff --git a/kernel/fork.c b/kernel/fork.c
index 600e93b5e539..7ffa16033ded 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -354,6 +354,8 @@ void free_task(struct task_struct *tsk)
354 ftrace_graph_exit_task(tsk); 354 ftrace_graph_exit_task(tsk);
355 put_seccomp_filter(tsk); 355 put_seccomp_filter(tsk);
356 arch_release_task_struct(tsk); 356 arch_release_task_struct(tsk);
357 if (tsk->flags & PF_KTHREAD)
358 free_kthread_struct(tsk);
357 free_task_struct(tsk); 359 free_task_struct(tsk);
358} 360}
359EXPORT_SYMBOL(free_task); 361EXPORT_SYMBOL(free_task);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index be2cc1f9dd57..9d64b6526d0b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -53,14 +53,38 @@ enum KTHREAD_BITS {
53 KTHREAD_IS_PARKED, 53 KTHREAD_IS_PARKED,
54}; 54};
55 55
56#define __to_kthread(vfork) \ 56static inline void set_kthread_struct(void *kthread)
57 container_of(vfork, struct kthread, exited) 57{
58 /*
59 * We abuse ->set_child_tid to avoid the new member and because it
60 * can't be wrongly copied by copy_process(). We also rely on fact
61 * that the caller can't exec, so PF_KTHREAD can't be cleared.
62 */
63 current->set_child_tid = (__force void __user *)kthread;
64}
58 65
59static inline struct kthread *to_kthread(struct task_struct *k) 66static inline struct kthread *to_kthread(struct task_struct *k)
60{ 67{
61 return __to_kthread(k->vfork_done); 68 WARN_ON(!(k->flags & PF_KTHREAD));
69 return (__force void *)k->set_child_tid;
70}
71
72void free_kthread_struct(struct task_struct *k)
73{
74 /*
75 * Can be NULL if this kthread was created by kernel_thread()
76 * or if kmalloc() in kthread() failed.
77 */
78 kfree(to_kthread(k));
62} 79}
63 80
81#define __to_kthread(vfork) \
82 container_of(vfork, struct kthread, exited)
83
84/*
85 * TODO: kill it and use to_kthread(). But we still need the users
86 * like kthread_stop() which has to sync with the exiting kthread.
87 */
64static struct kthread *to_live_kthread(struct task_struct *k) 88static struct kthread *to_live_kthread(struct task_struct *k)
65{ 89{
66 struct completion *vfork = ACCESS_ONCE(k->vfork_done); 90 struct completion *vfork = ACCESS_ONCE(k->vfork_done);
@@ -181,14 +205,11 @@ static int kthread(void *_create)
181 int (*threadfn)(void *data) = create->threadfn; 205 int (*threadfn)(void *data) = create->threadfn;
182 void *data = create->data; 206 void *data = create->data;
183 struct completion *done; 207 struct completion *done;
184 struct kthread self; 208 struct kthread *self;
185 int ret; 209 int ret;
186 210
187 self.flags = 0; 211 self = kmalloc(sizeof(*self), GFP_KERNEL);
188 self.data = data; 212 set_kthread_struct(self);
189 init_completion(&self.exited);
190 init_completion(&self.parked);
191 current->vfork_done = &self.exited;
192 213
193 /* If user was SIGKILLed, I release the structure. */ 214 /* If user was SIGKILLed, I release the structure. */
194 done = xchg(&create->done, NULL); 215 done = xchg(&create->done, NULL);
@@ -196,6 +217,19 @@ static int kthread(void *_create)
196 kfree(create); 217 kfree(create);
197 do_exit(-EINTR); 218 do_exit(-EINTR);
198 } 219 }
220
221 if (!self) {
222 create->result = ERR_PTR(-ENOMEM);
223 complete(done);
224 do_exit(-ENOMEM);
225 }
226
227 self->flags = 0;
228 self->data = data;
229 init_completion(&self->exited);
230 init_completion(&self->parked);
231 current->vfork_done = &self->exited;
232
199 /* OK, tell user we're spawned, wait for stop or wakeup */ 233 /* OK, tell user we're spawned, wait for stop or wakeup */
200 __set_current_state(TASK_UNINTERRUPTIBLE); 234 __set_current_state(TASK_UNINTERRUPTIBLE);
201 create->result = current; 235 create->result = current;
@@ -203,12 +237,10 @@ static int kthread(void *_create)
203 schedule(); 237 schedule();
204 238
205 ret = -EINTR; 239 ret = -EINTR;
206 240 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
207 if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { 241 __kthread_parkme(self);
208 __kthread_parkme(&self);
209 ret = threadfn(data); 242 ret = threadfn(data);
210 } 243 }
211 /* we can't just return, we must preserve "self" on stack */
212 do_exit(ret); 244 do_exit(ret);
213} 245}
214 246