diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-12 21:05:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-12 21:05:52 -0400 |
commit | 4e21fc138bfd7fe625ff5dc81541399aaf9d429b (patch) | |
tree | 43bedf14d2eee7711b8241dcfd6bd7b8737d9bd5 /arch/x86 | |
parent | 8418263e3547ed3816475e4c55a77004f0426ee6 (diff) | |
parent | 5522be6a4624a5f505555569e4d9cee946630686 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
Pull third pile of kernel_execve() patches from Al Viro:
"The last bits of infrastructure for kernel_thread() et.al., with
alpha/arm/x86 use of those. Plus sanitizing the asm glue and
do_notify_resume() on alpha, fixing the "disabled irq while running
task_work stuff" breakage there.
At that point the rest of kernel_thread/kernel_execve/sys_execve work
can be done independently for different architectures. The only
pending bits that do depend on having all architectures converted are
restrictred to fs/* and kernel/* - that'll obviously have to wait for
the next cycle.
I thought we'd have to wait for all of them done before we start
eliminating the longjump-style insanity in kernel_execve(), but it
turned out there's a very simple way to do that without flagday-style
changes."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal:
alpha: switch to saner kernel_execve() semantics
arm: switch to saner kernel_execve() semantics
x86, um: convert to saner kernel_execve() semantics
infrastructure for saner ret_from_kernel_thread semantics
make sure that kernel_thread() callbacks call do_exit() themselves
make sure that we always have a return path from kernel_execve()
ppc: eeh_event should just use kthread_run()
don't bother with kernel_thread/kernel_execve for launching linuxrc
alpha: get rid of switch_stack argument of do_work_pending()
alpha: don't bother passing switch_stack separately from regs
alpha: take SIGPENDING/NOTIFY_RESUME loop into signal.c
alpha: simplify TIF_NEED_RESCHED handling
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/unistd.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 31 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 24 | ||||
-rw-r--r-- | arch/x86/um/Kconfig | 1 |
5 files changed, 18 insertions, 40 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 42d2c35a5bb..70071b19eb9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -109,6 +109,7 @@ config X86 | |||
109 | select HAVE_RCU_USER_QS if X86_64 | 109 | select HAVE_RCU_USER_QS if X86_64 |
110 | select HAVE_IRQ_TIME_ACCOUNTING | 110 | select HAVE_IRQ_TIME_ACCOUNTING |
111 | select GENERIC_KERNEL_THREAD | 111 | select GENERIC_KERNEL_THREAD |
112 | select GENERIC_KERNEL_EXECVE | ||
112 | 113 | ||
113 | config INSTRUCTION_DECODER | 114 | config INSTRUCTION_DECODER |
114 | def_bool y | 115 | def_bool y |
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 55d155560fd..16f3fc6ebf2 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h | |||
@@ -51,7 +51,6 @@ | |||
51 | # define __ARCH_WANT_SYS_UTIME | 51 | # define __ARCH_WANT_SYS_UTIME |
52 | # define __ARCH_WANT_SYS_WAITPID | 52 | # define __ARCH_WANT_SYS_WAITPID |
53 | # define __ARCH_WANT_SYS_EXECVE | 53 | # define __ARCH_WANT_SYS_EXECVE |
54 | # define __ARCH_WANT_KERNEL_EXECVE | ||
55 | 54 | ||
56 | /* | 55 | /* |
57 | * "Conditional" syscalls | 56 | * "Conditional" syscalls |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 2c6340796fe..a1193aef6d7 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -299,12 +299,20 @@ ENTRY(ret_from_fork) | |||
299 | CFI_ENDPROC | 299 | CFI_ENDPROC |
300 | END(ret_from_fork) | 300 | END(ret_from_fork) |
301 | 301 | ||
302 | ENTRY(ret_from_kernel_execve) | 302 | ENTRY(ret_from_kernel_thread) |
303 | movl %eax, %esp | 303 | CFI_STARTPROC |
304 | movl $0,PT_EAX(%esp) | 304 | pushl_cfi %eax |
305 | call schedule_tail | ||
305 | GET_THREAD_INFO(%ebp) | 306 | GET_THREAD_INFO(%ebp) |
307 | popl_cfi %eax | ||
308 | pushl_cfi $0x0202 # Reset kernel eflags | ||
309 | popfl_cfi | ||
310 | movl PT_EBP(%esp),%eax | ||
311 | call *PT_EBX(%esp) | ||
312 | movl $0,PT_EAX(%esp) | ||
306 | jmp syscall_exit | 313 | jmp syscall_exit |
307 | END(ret_from_kernel_execve) | 314 | CFI_ENDPROC |
315 | ENDPROC(ret_from_kernel_thread) | ||
308 | 316 | ||
309 | /* | 317 | /* |
310 | * Interrupt exit functions should be protected against kprobes | 318 | * Interrupt exit functions should be protected against kprobes |
@@ -1015,21 +1023,6 @@ END(spurious_interrupt_bug) | |||
1015 | */ | 1023 | */ |
1016 | .popsection | 1024 | .popsection |
1017 | 1025 | ||
1018 | ENTRY(ret_from_kernel_thread) | ||
1019 | CFI_STARTPROC | ||
1020 | pushl_cfi %eax | ||
1021 | call schedule_tail | ||
1022 | GET_THREAD_INFO(%ebp) | ||
1023 | popl_cfi %eax | ||
1024 | pushl_cfi $0x0202 # Reset kernel eflags | ||
1025 | popfl_cfi | ||
1026 | movl PT_EBP(%esp),%eax | ||
1027 | call *PT_EBX(%esp) | ||
1028 | call do_exit | ||
1029 | ud2 # padding for call trace | ||
1030 | CFI_ENDPROC | ||
1031 | ENDPROC(ret_from_kernel_thread) | ||
1032 | |||
1033 | #ifdef CONFIG_XEN | 1026 | #ifdef CONFIG_XEN |
1034 | /* Xen doesn't set %esp to be precisely what the normal sysenter | 1027 | /* Xen doesn't set %esp to be precisely what the normal sysenter |
1035 | entrypoint expects, so fix it up before using the normal path. */ | 1028 | entrypoint expects, so fix it up before using the normal path. */ |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index cdc790c78f3..0c58952d64e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -563,15 +563,13 @@ ENTRY(ret_from_fork) | |||
563 | jmp ret_from_sys_call # go to the SYSRET fastpath | 563 | jmp ret_from_sys_call # go to the SYSRET fastpath |
564 | 564 | ||
565 | 1: | 565 | 1: |
566 | subq $REST_SKIP, %rsp # move the stack pointer back | 566 | subq $REST_SKIP, %rsp # leave space for volatiles |
567 | CFI_ADJUST_CFA_OFFSET REST_SKIP | 567 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
568 | movq %rbp, %rdi | 568 | movq %rbp, %rdi |
569 | call *%rbx | 569 | call *%rbx |
570 | # exit | 570 | movl $0, RAX(%rsp) |
571 | mov %eax, %edi | 571 | RESTORE_REST |
572 | call do_exit | 572 | jmp int_ret_from_sys_call |
573 | ud2 # padding for call trace | ||
574 | |||
575 | CFI_ENDPROC | 573 | CFI_ENDPROC |
576 | END(ret_from_fork) | 574 | END(ret_from_fork) |
577 | 575 | ||
@@ -1326,20 +1324,6 @@ bad_gs: | |||
1326 | jmp 2b | 1324 | jmp 2b |
1327 | .previous | 1325 | .previous |
1328 | 1326 | ||
1329 | ENTRY(ret_from_kernel_execve) | ||
1330 | movq %rdi, %rsp | ||
1331 | movl $0, RAX(%rsp) | ||
1332 | // RESTORE_REST | ||
1333 | movq 0*8(%rsp), %r15 | ||
1334 | movq 1*8(%rsp), %r14 | ||
1335 | movq 2*8(%rsp), %r13 | ||
1336 | movq 3*8(%rsp), %r12 | ||
1337 | movq 4*8(%rsp), %rbp | ||
1338 | movq 5*8(%rsp), %rbx | ||
1339 | addq $(6*8), %rsp | ||
1340 | jmp int_ret_from_sys_call | ||
1341 | END(ret_from_kernel_execve) | ||
1342 | |||
1343 | /* Call softirq on interrupt stack. Interrupts are off. */ | 1327 | /* Call softirq on interrupt stack. Interrupts are off. */ |
1344 | ENTRY(call_softirq) | 1328 | ENTRY(call_softirq) |
1345 | CFI_STARTPROC | 1329 | CFI_STARTPROC |
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index 30c4eec033a..9fa950df80e 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig | |||
@@ -14,6 +14,7 @@ config UML_X86 | |||
14 | def_bool y | 14 | def_bool y |
15 | select GENERIC_FIND_FIRST_BIT | 15 | select GENERIC_FIND_FIRST_BIT |
16 | select GENERIC_KERNEL_THREAD | 16 | select GENERIC_KERNEL_THREAD |
17 | select GENERIC_KERNEL_EXECVE | ||
17 | 18 | ||
18 | config 64BIT | 19 | config 64BIT |
19 | bool "64-bit kernel" if SUBARCH = "x86" | 20 | bool "64-bit kernel" if SUBARCH = "x86" |