aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2018-07-18 05:40:47 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-19 19:11:38 -0400
commitb92a165df17ee6e616e43107730f06bf6ecf5d8d (patch)
tree3135590197ae03f7dad7c28141ba641349686e32
parent8b376fae0514dc7ee04786e2327169e39d12e51b (diff)
x86/entry/32: Handle Entry from Kernel-Mode on Entry-Stack
It is possible that the kernel is entered from kernel-mode and on the entry-stack. The most common way this happens is when an exception is triggered while loading the user-space segment registers on the kernel-to-userspace exit path. The segment loading needs to be done after the entry-stack switch, because the stack-switch needs kernel %fs for per_cpu access. When this happens, make sure to leave the kernel with the entry-stack again, so that the interrupted code-path runs on the right stack when switching to the user-cr3. Detect this condition on kernel-entry by checking CS.RPL and %esp, and if it happens, copy over the complete content of the entry stack to the task-stack. This needs to be done because once the exception handler is entereed, the task might be scheduled out or even migrated to a different CPU, so this cannot rely on the entry-stack contents. Leave a marker in the stack-frame to detect this condition on the exit path. On the exit path the copy is reversed, copy all of the remaining task-stack back to the entry-stack and switch to it. Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Pavel Machek <pavel@ucw.cz> Cc: "H . Peter Anvin" <hpa@zytor.com> Cc: linux-mm@kvack.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Laight <David.Laight@aculab.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Eduardo Valentin <eduval@amazon.com> Cc: Greg KH <gregkh@linuxfoundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Waiman Long <llong@redhat.com> Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca> Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1531906876-13451-11-git-send-email-joro@8bytes.org
-rw-r--r--arch/x86/entry/entry_32.S116
1 files changed, 115 insertions, 1 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 763592596727..9d6eceba0461 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,9 @@
294 * copied there. So allocate the stack-frame on the task-stack and 294 * copied there. So allocate the stack-frame on the task-stack and
295 * switch to it before we do any copying. 295 * switch to it before we do any copying.
296 */ 296 */
297
298#define CS_FROM_ENTRY_STACK (1 << 31)
299
297.macro SWITCH_TO_KERNEL_STACK 300.macro SWITCH_TO_KERNEL_STACK
298 301
299 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV 302 ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
@@ -316,6 +319,16 @@
316 /* Load top of task-stack into %edi */ 319 /* Load top of task-stack into %edi */
317 movl TSS_entry2task_stack(%edi), %edi 320 movl TSS_entry2task_stack(%edi), %edi
318 321
322 /*
323 * Clear unused upper bits of the dword containing the word-sized CS
324 * slot in pt_regs in case hardware didn't clear it for us.
325 */
326 andl $(0x0000ffff), PT_CS(%esp)
327
328 /* Special case - entry from kernel mode via entry stack */
329 testl $SEGMENT_RPL_MASK, PT_CS(%esp)
330 jz .Lentry_from_kernel_\@
331
319 /* Bytes to copy */ 332 /* Bytes to copy */
320 movl $PTREGS_SIZE, %ecx 333 movl $PTREGS_SIZE, %ecx
321 334
@@ -329,8 +342,8 @@
329 */ 342 */
330 addl $(4 * 4), %ecx 343 addl $(4 * 4), %ecx
331 344
332.Lcopy_pt_regs_\@:
333#endif 345#endif
346.Lcopy_pt_regs_\@:
334 347
335 /* Allocate frame on task-stack */ 348 /* Allocate frame on task-stack */
336 subl %ecx, %edi 349 subl %ecx, %edi
@@ -346,6 +359,56 @@
346 cld 359 cld
347 rep movsl 360 rep movsl
348 361
362 jmp .Lend_\@
363
364.Lentry_from_kernel_\@:
365
366 /*
367 * This handles the case when we enter the kernel from
368 * kernel-mode and %esp points to the entry-stack. When this
369 * happens we need to switch to the task-stack to run C code,
370 * but switch back to the entry-stack again when we approach
371 * iret and return to the interrupted code-path. This usually
372 * happens when we hit an exception while restoring user-space
373 * segment registers on the way back to user-space.
374 *
375 * When we switch to the task-stack here, we can't trust the
376 * contents of the entry-stack anymore, as the exception handler
377 * might be scheduled out or moved to another CPU. Therefore we
378 * copy the complete entry-stack to the task-stack and set a
379 * marker in the iret-frame (bit 31 of the CS dword) to detect
380 * what we've done on the iret path.
381 *
382 * On the iret path we copy everything back and switch to the
383 * entry-stack, so that the interrupted kernel code-path
384 * continues on the same stack it was interrupted with.
385 *
386 * Be aware that an NMI can happen anytime in this code.
387 *
388 * %esi: Entry-Stack pointer (same as %esp)
389 * %edi: Top of the task stack
390 */
391
392 /* Calculate number of bytes on the entry stack in %ecx */
393 movl %esi, %ecx
394
395 /* %ecx to the top of entry-stack */
396 andl $(MASK_entry_stack), %ecx
397 addl $(SIZEOF_entry_stack), %ecx
398
399 /* Number of bytes on the entry stack to %ecx */
400 sub %esi, %ecx
401
402 /* Mark stackframe as coming from entry stack */
403 orl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
404
405 /*
406 * %esi and %edi are unchanged, %ecx contains the number of
407 * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
408 * the stack-frame on task-stack and copy everything over
409 */
410 jmp .Lcopy_pt_regs_\@
411
349.Lend_\@: 412.Lend_\@:
350.endm 413.endm
351 414
@@ -404,6 +467,56 @@
404.endm 467.endm
405 468
406/* 469/*
470 * This macro handles the case when we return to kernel-mode on the iret
471 * path and have to switch back to the entry stack.
472 *
473 * See the comments below the .Lentry_from_kernel_\@ label in the
474 * SWITCH_TO_KERNEL_STACK macro for more details.
475 */
476.macro PARANOID_EXIT_TO_KERNEL_MODE
477
478 /*
479 * Test if we entered the kernel with the entry-stack. Most
480 * likely we did not, because this code only runs on the
481 * return-to-kernel path.
482 */
483 testl $CS_FROM_ENTRY_STACK, PT_CS(%esp)
484 jz .Lend_\@
485
486 /* Unlikely slow-path */
487
488 /* Clear marker from stack-frame */
489 andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
490
491 /* Copy the remaining task-stack contents to entry-stack */
492 movl %esp, %esi
493 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
494
495 /* Bytes on the task-stack to ecx */
496 movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
497 subl %esi, %ecx
498
499 /* Allocate stack-frame on entry-stack */
500 subl %ecx, %edi
501
502 /*
503 * Save future stack-pointer, we must not switch until the
504 * copy is done, otherwise the NMI handler could destroy the
505 * contents of the task-stack we are about to copy.
506 */
507 movl %edi, %ebx
508
509 /* Do the copy */
510 shrl $2, %ecx
511 cld
512 rep movsl
513
514 /* Safe to switch to entry-stack now */
515 movl %ebx, %esp
516
517.Lend_\@:
518.endm
519/*
407 * %eax: prev task 520 * %eax: prev task
408 * %edx: next task 521 * %edx: next task
409 */ 522 */
@@ -764,6 +877,7 @@ restore_all:
764 877
765restore_all_kernel: 878restore_all_kernel:
766 TRACE_IRQS_IRET 879 TRACE_IRQS_IRET
880 PARANOID_EXIT_TO_KERNEL_MODE
767 RESTORE_REGS 4 881 RESTORE_REGS 4
768 jmp .Lirq_return 882 jmp .Lirq_return
769 883