aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/process_32.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2015-03-06 20:50:18 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-07 03:34:03 -0500
commitb27559a433bb6080d95c2593d4a2b81401197911 (patch)
treee4d5797f9d99b3a23e34de8c7a9900e78ba244d8 /arch/x86/kernel/process_32.c
parent9b47668843d800ed57f6f6bfd6f5c4cffdf201c6 (diff)
x86/asm/entry: Delay loading sp0 slightly on task switch
The change: 75182b1632a8 ("x86/asm/entry: Switch all C consumers of kernel_stack to this_cpu_sp0()") had the unintended side effect of changing the return value of current_thread_info() during part of the context switch process. Change it back. This has no effect as far as I can tell -- it's just for consistency. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/9fcaa47dd8487db59eed7a3911b6ae409476763e.1425692936.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/process_32.c')
-rw-r--r--arch/x86/kernel/process_32.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d3460af3d27a..0405cab6634d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -256,11 +256,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
256 fpu = switch_fpu_prepare(prev_p, next_p, cpu); 256 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
257 257
258 /* 258 /*
259 * Reload esp0.
260 */
261 load_sp0(tss, next);
262
263 /*
264 * Save away %gs. No need to save %fs, as it was saved on the 259 * Save away %gs. No need to save %fs, as it was saved on the
265 * stack on entry. No need to save %es and %ds, as those are 260 * stack on entry. No need to save %es and %ds, as those are
266 * always kernel segments while inside the kernel. Doing this 261 * always kernel segments while inside the kernel. Doing this
@@ -310,6 +305,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
310 */ 305 */
311 arch_end_context_switch(next_p); 306 arch_end_context_switch(next_p);
312 307
308 /*
309 * Reload esp0. This changes current_thread_info().
310 */
311 load_sp0(tss, next);
312
313 this_cpu_write(kernel_stack, 313 this_cpu_write(kernel_stack,
314 (unsigned long)task_stack_page(next_p) + 314 (unsigned long)task_stack_page(next_p) +
315 THREAD_SIZE - KERNEL_STACK_OFFSET); 315 THREAD_SIZE - KERNEL_STACK_OFFSET);