aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:05:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:05:52 -0400
commit4e21fc138bfd7fe625ff5dc81541399aaf9d429b (patch)
tree43bedf14d2eee7711b8241dcfd6bd7b8737d9bd5 /arch/x86/kernel
parent8418263e3547ed3816475e4c55a77004f0426ee6 (diff)
parent5522be6a4624a5f505555569e4d9cee946630686 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
Pull third pile of kernel_execve() patches from Al Viro: "The last bits of infrastructure for kernel_thread() et.al., with alpha/arm/x86 use of those. Plus sanitizing the asm glue and do_notify_resume() on alpha, fixing the "disabled irq while running task_work stuff" breakage there. At that point the rest of kernel_thread/kernel_execve/sys_execve work can be done independently for different architectures. The only pending bits that do depend on having all architectures converted are restrictred to fs/* and kernel/* - that'll obviously have to wait for the next cycle. I thought we'd have to wait for all of them done before we start eliminating the longjump-style insanity in kernel_execve(), but it turned out there's a very simple way to do that without flagday-style changes." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: alpha: switch to saner kernel_execve() semantics arm: switch to saner kernel_execve() semantics x86, um: convert to saner kernel_execve() semantics infrastructure for saner ret_from_kernel_thread semantics make sure that kernel_thread() callbacks call do_exit() themselves make sure that we always have a return path from kernel_execve() ppc: eeh_event should just use kthread_run() don't bother with kernel_thread/kernel_execve for launching linuxrc alpha: get rid of switch_stack argument of do_work_pending() alpha: don't bother passing switch_stack separately from regs alpha: take SIGPENDING/NOTIFY_RESUME loop into signal.c alpha: simplify TIF_NEED_RESCHED handling
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/entry_32.S31
-rw-r--r--arch/x86/kernel/entry_64.S24
2 files changed, 16 insertions, 39 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 2c6340796fe9..a1193aef6d7d 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -299,12 +299,20 @@ ENTRY(ret_from_fork)
299 CFI_ENDPROC 299 CFI_ENDPROC
300END(ret_from_fork) 300END(ret_from_fork)
301 301
302ENTRY(ret_from_kernel_execve) 302ENTRY(ret_from_kernel_thread)
303 movl %eax, %esp 303 CFI_STARTPROC
304 movl $0,PT_EAX(%esp) 304 pushl_cfi %eax
305 call schedule_tail
305 GET_THREAD_INFO(%ebp) 306 GET_THREAD_INFO(%ebp)
307 popl_cfi %eax
308 pushl_cfi $0x0202 # Reset kernel eflags
309 popfl_cfi
310 movl PT_EBP(%esp),%eax
311 call *PT_EBX(%esp)
312 movl $0,PT_EAX(%esp)
306 jmp syscall_exit 313 jmp syscall_exit
307END(ret_from_kernel_execve) 314 CFI_ENDPROC
315ENDPROC(ret_from_kernel_thread)
308 316
309/* 317/*
310 * Interrupt exit functions should be protected against kprobes 318 * Interrupt exit functions should be protected against kprobes
@@ -1015,21 +1023,6 @@ END(spurious_interrupt_bug)
1015 */ 1023 */
1016 .popsection 1024 .popsection
1017 1025
1018ENTRY(ret_from_kernel_thread)
1019 CFI_STARTPROC
1020 pushl_cfi %eax
1021 call schedule_tail
1022 GET_THREAD_INFO(%ebp)
1023 popl_cfi %eax
1024 pushl_cfi $0x0202 # Reset kernel eflags
1025 popfl_cfi
1026 movl PT_EBP(%esp),%eax
1027 call *PT_EBX(%esp)
1028 call do_exit
1029 ud2 # padding for call trace
1030 CFI_ENDPROC
1031ENDPROC(ret_from_kernel_thread)
1032
1033#ifdef CONFIG_XEN 1026#ifdef CONFIG_XEN
1034/* Xen doesn't set %esp to be precisely what the normal sysenter 1027/* Xen doesn't set %esp to be precisely what the normal sysenter
1035 entrypoint expects, so fix it up before using the normal path. */ 1028 entrypoint expects, so fix it up before using the normal path. */
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index cdc790c78f32..0c58952d64e8 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -563,15 +563,13 @@ ENTRY(ret_from_fork)
563 jmp ret_from_sys_call # go to the SYSRET fastpath 563 jmp ret_from_sys_call # go to the SYSRET fastpath
564 564
5651: 5651:
566 subq $REST_SKIP, %rsp # move the stack pointer back 566 subq $REST_SKIP, %rsp # leave space for volatiles
567 CFI_ADJUST_CFA_OFFSET REST_SKIP 567 CFI_ADJUST_CFA_OFFSET REST_SKIP
568 movq %rbp, %rdi 568 movq %rbp, %rdi
569 call *%rbx 569 call *%rbx
570 # exit 570 movl $0, RAX(%rsp)
571 mov %eax, %edi 571 RESTORE_REST
572 call do_exit 572 jmp int_ret_from_sys_call
573 ud2 # padding for call trace
574
575 CFI_ENDPROC 573 CFI_ENDPROC
576END(ret_from_fork) 574END(ret_from_fork)
577 575
@@ -1326,20 +1324,6 @@ bad_gs:
1326 jmp 2b 1324 jmp 2b
1327 .previous 1325 .previous
1328 1326
1329ENTRY(ret_from_kernel_execve)
1330 movq %rdi, %rsp
1331 movl $0, RAX(%rsp)
1332 // RESTORE_REST
1333 movq 0*8(%rsp), %r15
1334 movq 1*8(%rsp), %r14
1335 movq 2*8(%rsp), %r13
1336 movq 3*8(%rsp), %r12
1337 movq 4*8(%rsp), %rbp
1338 movq 5*8(%rsp), %rbx
1339 addq $(6*8), %rsp
1340 jmp int_ret_from_sys_call
1341END(ret_from_kernel_execve)
1342
1343/* Call softirq on interrupt stack. Interrupts are off. */ 1327/* Call softirq on interrupt stack. Interrupts are off. */
1344ENTRY(call_softirq) 1328ENTRY(call_softirq)
1345 CFI_STARTPROC 1329 CFI_STARTPROC