diff options
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r-- | kernel/kthread.c | 52 |
1 files changed, 28 insertions, 24 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2ef9baf..9eb7fed0bbaa 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task) | |||
124 | 124 | ||
125 | static void __kthread_parkme(struct kthread *self) | 125 | static void __kthread_parkme(struct kthread *self) |
126 | { | 126 | { |
127 | __set_current_state(TASK_INTERRUPTIBLE); | 127 | __set_current_state(TASK_PARKED); |
128 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { | 128 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { |
129 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) | 129 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) |
130 | complete(&self->parked); | 130 | complete(&self->parked); |
131 | schedule(); | 131 | schedule(); |
132 | __set_current_state(TASK_INTERRUPTIBLE); | 132 | __set_current_state(TASK_PARKED); |
133 | } | 133 | } |
134 | clear_bit(KTHREAD_IS_PARKED, &self->flags); | 134 | clear_bit(KTHREAD_IS_PARKED, &self->flags); |
135 | __set_current_state(TASK_RUNNING); | 135 | __set_current_state(TASK_RUNNING); |
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
256 | } | 256 | } |
257 | EXPORT_SYMBOL(kthread_create_on_node); | 257 | EXPORT_SYMBOL(kthread_create_on_node); |
258 | 258 | ||
259 | static void __kthread_bind(struct task_struct *p, unsigned int cpu) | 259 | static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) |
260 | { | 260 | { |
261 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
262 | if (!wait_task_inactive(p, state)) { | ||
263 | WARN_ON(1); | ||
264 | return; | ||
265 | } | ||
261 | /* It's safe because the task is inactive. */ | 266 | /* It's safe because the task is inactive. */ |
262 | do_set_cpus_allowed(p, cpumask_of(cpu)); | 267 | do_set_cpus_allowed(p, cpumask_of(cpu)); |
263 | p->flags |= PF_THREAD_BOUND; | 268 | p->flags |= PF_THREAD_BOUND; |
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu) | |||
274 | */ | 279 | */ |
275 | void kthread_bind(struct task_struct *p, unsigned int cpu) | 280 | void kthread_bind(struct task_struct *p, unsigned int cpu) |
276 | { | 281 | { |
277 | /* Must have done schedule() in kthread() before we set_task_cpu */ | 282 | __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); |
278 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
279 | WARN_ON(1); | ||
280 | return; | ||
281 | } | ||
282 | __kthread_bind(p, cpu); | ||
283 | } | 283 | } |
284 | EXPORT_SYMBOL(kthread_bind); | 284 | EXPORT_SYMBOL(kthread_bind); |
285 | 285 | ||
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k) | |||
324 | return NULL; | 324 | return NULL; |
325 | } | 325 | } |
326 | 326 | ||
327 | static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) | ||
328 | { | ||
329 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | ||
330 | /* | ||
331 | * We clear the IS_PARKED bit here as we don't wait | ||
332 | * until the task has left the park code. So if we'd | ||
333 | * park before that happens we'd see the IS_PARKED bit | ||
334 | * which might be about to be cleared. | ||
335 | */ | ||
336 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
337 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
338 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | ||
339 | wake_up_state(k, TASK_PARKED); | ||
340 | } | ||
341 | } | ||
342 | |||
327 | /** | 343 | /** |
328 | * kthread_unpark - unpark a thread created by kthread_create(). | 344 | * kthread_unpark - unpark a thread created by kthread_create(). |
329 | * @k: thread created by kthread_create(). | 345 | * @k: thread created by kthread_create(). |
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k) | |||
336 | { | 352 | { |
337 | struct kthread *kthread = task_get_live_kthread(k); | 353 | struct kthread *kthread = task_get_live_kthread(k); |
338 | 354 | ||
339 | if (kthread) { | 355 | if (kthread) |
340 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 356 | __kthread_unpark(k, kthread); |
341 | /* | ||
342 | * We clear the IS_PARKED bit here as we don't wait | ||
343 | * until the task has left the park code. So if we'd | ||
344 | * park before that happens we'd see the IS_PARKED bit | ||
345 | * which might be about to be cleared. | ||
346 | */ | ||
347 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
348 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
349 | __kthread_bind(k, kthread->cpu); | ||
350 | wake_up_process(k); | ||
351 | } | ||
352 | } | ||
353 | put_task_struct(k); | 357 | put_task_struct(k); |
354 | } | 358 | } |
355 | 359 | ||
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k) | |||
407 | trace_sched_kthread_stop(k); | 411 | trace_sched_kthread_stop(k); |
408 | if (kthread) { | 412 | if (kthread) { |
409 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); | 413 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); |
410 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 414 | __kthread_unpark(k, kthread); |
411 | wake_up_process(k); | 415 | wake_up_process(k); |
412 | wait_for_completion(&kthread->exited); | 416 | wait_for_completion(&kthread->exited); |
413 | } | 417 | } |