aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-04-30 18:24:46 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-06-01 13:01:51 -0400
commit44fbbb3dc687c9709a6f2236197316e5c79ab1eb (patch)
treed3c995b1cb7e3f9ac5af09b8d78f6c839a65f35a /arch
parent29bf5dd895219e5111099908040aecfc1509f9bb (diff)
x86: get rid of calling do_notify_resume() when returning to kernel mode
If we end up calling do_notify_resume() with !user_mode(refs), it does nothing (do_signal() explicitly bails out and we can't get there with TIF_NOTIFY_RESUME in such situations). Then we jump to resume_userspace_sig, which rechecks the same thing and bails out to resume_kernel, thus breaking the loop. It's easier and cheaper to check *before* calling do_notify_resume() and bail out to resume_kernel immediately. And kill the check in do_signal()... Note that on amd64 we can't get there with !user_mode() at all - asm glue takes care of that. Acked-and-reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/entry_32.S13
-rw-r--r--arch/x86/kernel/signal.c10
2 files changed, 10 insertions, 13 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 01ccf9b7147..623f2883747 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -316,7 +316,6 @@ ret_from_exception:
316 preempt_stop(CLBR_ANY) 316 preempt_stop(CLBR_ANY)
317ret_from_intr: 317ret_from_intr:
318 GET_THREAD_INFO(%ebp) 318 GET_THREAD_INFO(%ebp)
319resume_userspace_sig:
320#ifdef CONFIG_VM86 319#ifdef CONFIG_VM86
321 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 320 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
322 movb PT_CS(%esp), %al 321 movb PT_CS(%esp), %al
@@ -615,9 +614,13 @@ work_notifysig: # deal with pending signals and
615 # vm86-space 614 # vm86-space
616 TRACE_IRQS_ON 615 TRACE_IRQS_ON
617 ENABLE_INTERRUPTS(CLBR_NONE) 616 ENABLE_INTERRUPTS(CLBR_NONE)
617 movb PT_CS(%esp), %bl
618 andb $SEGMENT_RPL_MASK, %bl
619 cmpb $USER_RPL, %bl
620 jb resume_kernel
618 xorl %edx, %edx 621 xorl %edx, %edx
619 call do_notify_resume 622 call do_notify_resume
620 jmp resume_userspace_sig 623 jmp resume_userspace
621 624
622 ALIGN 625 ALIGN
623work_notifysig_v86: 626work_notifysig_v86:
@@ -630,9 +633,13 @@ work_notifysig_v86:
630#endif 633#endif
631 TRACE_IRQS_ON 634 TRACE_IRQS_ON
632 ENABLE_INTERRUPTS(CLBR_NONE) 635 ENABLE_INTERRUPTS(CLBR_NONE)
636 movb PT_CS(%esp), %bl
637 andb $SEGMENT_RPL_MASK, %bl
638 cmpb $USER_RPL, %bl
639 jb resume_kernel
633 xorl %edx, %edx 640 xorl %edx, %edx
634 call do_notify_resume 641 call do_notify_resume
635 jmp resume_userspace_sig 642 jmp resume_userspace
636END(work_pending) 643END(work_pending)
637 644
638 # perform syscall exit tracing 645 # perform syscall exit tracing
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e8a89374d35..21af737053a 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -737,16 +737,6 @@ static void do_signal(struct pt_regs *regs)
737 siginfo_t info; 737 siginfo_t info;
738 int signr; 738 int signr;
739 739
740 /*
741 * We want the common case to go fast, which is why we may in certain
742 * cases get here from kernel mode. Just return without doing anything
743 * if so.
744 * X86_32: vm86 regs switched out by assembly code before reaching
745 * here, so testing against kernel CS suffices.
746 */
747 if (!user_mode(regs))
748 return;
749
750 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 740 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
751 if (signr > 0) { 741 if (signr > 0) {
752 /* Whee! Actually deliver the signal. */ 742 /* Whee! Actually deliver the signal. */