diff options
author | Andy Lutomirski <luto@kernel.org> | 2015-08-17 15:22:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-18 03:39:26 -0400 |
commit | 512255a2ad2c832ca7d4de9f31245f73781922d0 (patch) | |
tree | 494fcca893dc7a02c04353f11a21a0672f527fb6 | |
parent | 2c6625cd545bdd66acff14f3394865d43920a5c7 (diff) |
Revert "sched/x86_64: Don't save flags on context switch"
This reverts commit:
2c7577a75837 ("sched/x86_64: Don't save flags on context switch")
It was a nice speedup. It's also not quite correct: SYSENTER
enables interrupts too early.
We can re-add this optimization once the SYSENTER code is beaten
into shape, which should happen in 4.3 or 4.4.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org # v3.19
Link: http://lkml.kernel.org/r/85f56651f59f76624e80785a8fd3bdfdd089a818.1439838962.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/switch_to.h | 12 |
1 files changed, 4 insertions, 8 deletions
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 751bf4b7bf11..d7f3b3b78ac3 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h | |||
@@ -79,12 +79,12 @@ do { \ | |||
79 | #else /* CONFIG_X86_32 */ | 79 | #else /* CONFIG_X86_32 */ |
80 | 80 | ||
81 | /* frame pointer must be last for get_wchan */ | 81 | /* frame pointer must be last for get_wchan */ |
82 | #define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" | 82 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
83 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" | 83 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" |
84 | 84 | ||
85 | #define __EXTRA_CLOBBER \ | 85 | #define __EXTRA_CLOBBER \ |
86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | 86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ |
87 | "r12", "r13", "r14", "r15", "flags" | 87 | "r12", "r13", "r14", "r15" |
88 | 88 | ||
89 | #ifdef CONFIG_CC_STACKPROTECTOR | 89 | #ifdef CONFIG_CC_STACKPROTECTOR |
90 | #define __switch_canary \ | 90 | #define __switch_canary \ |
@@ -100,11 +100,7 @@ do { \ | |||
100 | #define __switch_canary_iparam | 100 | #define __switch_canary_iparam |
101 | #endif /* CC_STACKPROTECTOR */ | 101 | #endif /* CC_STACKPROTECTOR */ |
102 | 102 | ||
103 | /* | 103 | /* Save restore flags to clear handle leaking NT */ |
104 | * There is no need to save or restore flags, because flags are always | ||
105 | * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL | ||
106 | * has no effect. | ||
107 | */ | ||
108 | #define switch_to(prev, next, last) \ | 104 | #define switch_to(prev, next, last) \ |
109 | asm volatile(SAVE_CONTEXT \ | 105 | asm volatile(SAVE_CONTEXT \ |
110 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 106 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |