diff options
author | Andy Lutomirski <luto@kernel.org> | 2015-07-31 17:41:09 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-05 04:54:35 -0400 |
commit | 5d73fc70996d9de0d1b2fc87e62dc51153204eba (patch) | |
tree | ffafd0c3dc587df0544c8165f6df4da4b1e37fb8 | |
parent | c5f69fde26d1581ee495f68bb9de4049c8168a04 (diff) |
x86/entry/32: Migrate to C exit path
This removes the hybrid asm-and-C implementation of exit work.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eric Paris <eparis@parisplace.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/2baa438619ea6c027b40ec9fceacca52f09c74d09.1438378274.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/entry/entry_32.S | 62 |
1 files changed, 11 insertions, 51 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index a3c307ad5ac4..b2909bf8cf70 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -256,14 +256,10 @@ ret_from_intr: | |||
256 | 256 | ||
257 | ENTRY(resume_userspace) | 257 | ENTRY(resume_userspace) |
258 | LOCKDEP_SYS_EXIT | 258 | LOCKDEP_SYS_EXIT |
259 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 259 | DISABLE_INTERRUPTS(CLBR_ANY) |
260 | # setting need_resched or sigpending | ||
261 | # between sampling and the iret | ||
262 | TRACE_IRQS_OFF | 260 | TRACE_IRQS_OFF |
263 | movl TI_flags(%ebp), %ecx | 261 | movl %esp, %eax |
264 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | 262 | call prepare_exit_to_usermode |
265 | # int/exception return? | ||
266 | jne work_pending | ||
267 | jmp restore_all | 263 | jmp restore_all |
268 | END(ret_from_exception) | 264 | END(ret_from_exception) |
269 | 265 | ||
@@ -341,7 +337,7 @@ sysenter_after_call: | |||
341 | TRACE_IRQS_OFF | 337 | TRACE_IRQS_OFF |
342 | movl TI_flags(%ebp), %ecx | 338 | movl TI_flags(%ebp), %ecx |
343 | testl $_TIF_ALLWORK_MASK, %ecx | 339 | testl $_TIF_ALLWORK_MASK, %ecx |
344 | jnz syscall_exit_work | 340 | jnz syscall_exit_work_irqs_off |
345 | sysenter_exit: | 341 | sysenter_exit: |
346 | /* if something modifies registers it must also disable sysexit */ | 342 | /* if something modifies registers it must also disable sysexit */ |
347 | movl PT_EIP(%esp), %edx | 343 | movl PT_EIP(%esp), %edx |
@@ -377,13 +373,7 @@ syscall_after_call: | |||
377 | movl %eax, PT_EAX(%esp) # store the return value | 373 | movl %eax, PT_EAX(%esp) # store the return value |
378 | syscall_exit: | 374 | syscall_exit: |
379 | LOCKDEP_SYS_EXIT | 375 | LOCKDEP_SYS_EXIT |
380 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 376 | jmp syscall_exit_work |
381 | # setting need_resched or sigpending | ||
382 | # between sampling and the iret | ||
383 | TRACE_IRQS_OFF | ||
384 | movl TI_flags(%ebp), %ecx | ||
385 | testl $_TIF_ALLWORK_MASK, %ecx # current->work | ||
386 | jnz syscall_exit_work | ||
387 | 377 | ||
388 | restore_all: | 378 | restore_all: |
389 | TRACE_IRQS_IRET | 379 | TRACE_IRQS_IRET |
@@ -460,35 +450,6 @@ ldt_ss: | |||
460 | #endif | 450 | #endif |
461 | ENDPROC(entry_INT80_32) | 451 | ENDPROC(entry_INT80_32) |
462 | 452 | ||
463 | # perform work that needs to be done immediately before resumption | ||
464 | ALIGN | ||
465 | work_pending: | ||
466 | testb $_TIF_NEED_RESCHED, %cl | ||
467 | jz work_notifysig | ||
468 | work_resched: | ||
469 | call schedule | ||
470 | LOCKDEP_SYS_EXIT | ||
471 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | ||
472 | # setting need_resched or sigpending | ||
473 | # between sampling and the iret | ||
474 | TRACE_IRQS_OFF | ||
475 | movl TI_flags(%ebp), %ecx | ||
476 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | ||
477 | # than syscall tracing? | ||
478 | jz restore_all | ||
479 | testb $_TIF_NEED_RESCHED, %cl | ||
480 | jnz work_resched | ||
481 | |||
482 | work_notifysig: # deal with pending signals and | ||
483 | # notify-resume requests | ||
484 | TRACE_IRQS_ON | ||
485 | ENABLE_INTERRUPTS(CLBR_NONE) | ||
486 | movl %esp, %eax | ||
487 | xorl %edx, %edx | ||
488 | call do_notify_resume | ||
489 | jmp resume_userspace | ||
490 | END(work_pending) | ||
491 | |||
492 | # perform syscall exit tracing | 453 | # perform syscall exit tracing |
493 | ALIGN | 454 | ALIGN |
494 | syscall_trace_entry: | 455 | syscall_trace_entry: |
@@ -503,15 +464,14 @@ END(syscall_trace_entry) | |||
503 | 464 | ||
504 | # perform syscall exit tracing | 465 | # perform syscall exit tracing |
505 | ALIGN | 466 | ALIGN |
506 | syscall_exit_work: | 467 | syscall_exit_work_irqs_off: |
507 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx | ||
508 | jz work_pending | ||
509 | TRACE_IRQS_ON | 468 | TRACE_IRQS_ON |
510 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call | 469 | ENABLE_INTERRUPTS(CLBR_ANY) |
511 | # schedule() instead | 470 | |
471 | syscall_exit_work: | ||
512 | movl %esp, %eax | 472 | movl %esp, %eax |
513 | call syscall_trace_leave | 473 | call syscall_return_slowpath |
514 | jmp resume_userspace | 474 | jmp restore_all |
515 | END(syscall_exit_work) | 475 | END(syscall_exit_work) |
516 | 476 | ||
517 | syscall_fault: | 477 | syscall_fault: |