diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:13:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-03 19:13:28 -0400 |
commit | 1a4a2bc460721bc8f91e4c1294d39b38e5af132f (patch) | |
tree | fe646d05f6e17f05601e0a32cc796bec718ab6e7 /kernel/kthread.c | |
parent | 110a9e42b68719f584879c5c5c727bbae90d15f9 (diff) | |
parent | 1ef55be16ed69538f89e0a6508be5e62fdc9851c (diff) |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull low-level x86 updates from Ingo Molnar:
"In this cycle this topic tree has become one of those 'super topics'
that accumulated a lot of changes:
- Add CONFIG_VMAP_STACK=y support to the core kernel and enable it on
x86 - preceded by an array of changes. v4.8 saw preparatory changes
in this area already - this is the rest of the work. Includes the
thread stack caching performance optimization. (Andy Lutomirski)
- switch_to() cleanups and all around enhancements. (Brian Gerst)
- A large number of dumpstack infrastructure enhancements and an
unwinder abstraction. The secret long term plan is safe(r) live
patching plus maybe another attempt at debuginfo based unwinding -
but all these current bits are standalone enhancements in a frame
pointer based debug environment as well. (Josh Poimboeuf)
- More __ro_after_init and const annotations. (Kees Cook)
- Enable KASLR for the vmemmap memory region. (Thomas Garnier)"
[ The virtually mapped stack changes are pretty fundamental, and not
x86-specific per se, even if they are only used on x86 right now. ]
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
x86/asm: Get rid of __read_cr4_safe()
thread_info: Use unsigned long for flags
x86/alternatives: Add stack frame dependency to alternative_call_2()
x86/dumpstack: Fix show_stack() task pointer regression
x86/dumpstack: Remove dump_trace() and related callbacks
x86/dumpstack: Convert show_trace_log_lvl() to use the new unwinder
oprofile/x86: Convert x86_backtrace() to use the new unwinder
x86/stacktrace: Convert save_stack_trace_*() to use the new unwinder
perf/x86: Convert perf_callchain_kernel() to use the new unwinder
x86/unwind: Add new unwind interface and implementations
x86/dumpstack: Remove NULL task pointer convention
fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y
sched/core: Free the stack early if CONFIG_THREAD_INFO_IN_TASK
lib/syscall: Pin the task stack in collect_syscall()
x86/process: Pin the target stack in get_wchan()
x86/dumpstack: Pin the target stack when dumping it
kthread: Pin the stack via try_get_task_stack()/put_task_stack() in to_live_kthread() function
sched/core: Add try_get_task_stack() and put_task_stack()
x86/entry/64: Fix a minor comment rebase error
iommu/amd: Don't put completion-wait semaphore on stack
...
Diffstat (limited to 'kernel/kthread.c')
-rw-r--r-- | kernel/kthread.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/kernel/kthread.c b/kernel/kthread.c index 9ff173dca1ae..4ab4c3766a80 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -64,7 +64,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) | |||
64 | static struct kthread *to_live_kthread(struct task_struct *k) | 64 | static struct kthread *to_live_kthread(struct task_struct *k) |
65 | { | 65 | { |
66 | struct completion *vfork = ACCESS_ONCE(k->vfork_done); | 66 | struct completion *vfork = ACCESS_ONCE(k->vfork_done); |
67 | if (likely(vfork)) | 67 | if (likely(vfork) && try_get_task_stack(k)) |
68 | return __to_kthread(vfork); | 68 | return __to_kthread(vfork); |
69 | return NULL; | 69 | return NULL; |
70 | } | 70 | } |
@@ -425,8 +425,10 @@ void kthread_unpark(struct task_struct *k) | |||
425 | { | 425 | { |
426 | struct kthread *kthread = to_live_kthread(k); | 426 | struct kthread *kthread = to_live_kthread(k); |
427 | 427 | ||
428 | if (kthread) | 428 | if (kthread) { |
429 | __kthread_unpark(k, kthread); | 429 | __kthread_unpark(k, kthread); |
430 | put_task_stack(k); | ||
431 | } | ||
430 | } | 432 | } |
431 | EXPORT_SYMBOL_GPL(kthread_unpark); | 433 | EXPORT_SYMBOL_GPL(kthread_unpark); |
432 | 434 | ||
@@ -455,6 +457,7 @@ int kthread_park(struct task_struct *k) | |||
455 | wait_for_completion(&kthread->parked); | 457 | wait_for_completion(&kthread->parked); |
456 | } | 458 | } |
457 | } | 459 | } |
460 | put_task_stack(k); | ||
458 | ret = 0; | 461 | ret = 0; |
459 | } | 462 | } |
460 | return ret; | 463 | return ret; |
@@ -490,6 +493,7 @@ int kthread_stop(struct task_struct *k) | |||
490 | __kthread_unpark(k, kthread); | 493 | __kthread_unpark(k, kthread); |
491 | wake_up_process(k); | 494 | wake_up_process(k); |
492 | wait_for_completion(&kthread->exited); | 495 | wait_for_completion(&kthread->exited); |
496 | put_task_stack(k); | ||
493 | } | 497 | } |
494 | ret = k->exit_code; | 498 | ret = k->exit_code; |
495 | put_task_struct(k); | 499 | put_task_struct(k); |