aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2018-07-18 05:40:43 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-19 19:11:37 -0400
commit0d2eb73b29996684d5bbb72f85c74b47b4c359f7 (patch)
treed4eeb7755f52f4d52c58c54098c5faca7d862511
parent8e676ced31e9d1448d3ffc4159586a259cc67f30 (diff)
x86/entry/32: Split off return-to-kernel path
Use a separate return path when returning to the kernel. This allows to put the PTI cr3-switch and the switch to the entry-stack into the return-to-user path without further checking. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-7-git-send-email-joro@8bytes.org
-rw-r--r--arch/x86/entry/entry_32.S11
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 43641310b6e3..7251c4f3e99e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -65,7 +65,7 @@
65# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF 65# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
66#else 66#else
67# define preempt_stop(clobbers) 67# define preempt_stop(clobbers)
68# define resume_kernel restore_all 68# define resume_kernel restore_all_kernel
69#endif 69#endif
70 70
71.macro TRACE_IRQS_IRET 71.macro TRACE_IRQS_IRET
@@ -399,9 +399,9 @@ ENTRY(resume_kernel)
399 DISABLE_INTERRUPTS(CLBR_ANY) 399 DISABLE_INTERRUPTS(CLBR_ANY)
400.Lneed_resched: 400.Lneed_resched:
401 cmpl $0, PER_CPU_VAR(__preempt_count) 401 cmpl $0, PER_CPU_VAR(__preempt_count)
402 jnz restore_all 402 jnz restore_all_kernel
403 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? 403 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
404 jz restore_all 404 jz restore_all_kernel
405 call preempt_schedule_irq 405 call preempt_schedule_irq
406 jmp .Lneed_resched 406 jmp .Lneed_resched
407END(resume_kernel) 407END(resume_kernel)
@@ -606,6 +606,11 @@ restore_all:
606 */ 606 */
607 INTERRUPT_RETURN 607 INTERRUPT_RETURN
608 608
609restore_all_kernel:
610 TRACE_IRQS_IRET
611 RESTORE_REGS 4
612 jmp .Lirq_return
613
609.section .fixup, "ax" 614.section .fixup, "ax"
610ENTRY(iret_exc ) 615ENTRY(iret_exc )
611 pushl $0 # no error code 616 pushl $0 # no error code