diff options
author | Andy Lutomirski <luto@kernel.org> | 2017-11-02 03:59:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-11-02 06:04:48 -0400 |
commit | d375cf1530595e33961a8844192cddab913650e3 (patch) | |
tree | 887429bbeeef5048ed511d07a5d8078101129bec | |
parent | cd493a6deb8b78eca280d05f7fa73fd69403ae29 (diff) |
x86/entry/64: Remove thread_struct::sp0
On x86_64, we can easily calculate sp0 when needed instead of
storing it in thread_struct.
On x86_32, a similar cleanup would be possible, but it would require
cleaning up the vm86 code first, and that can wait for a later
cleanup series.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/719cd9c66c548c4350d98a90f050aee8b17f8919.1509609304.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/compat.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 28 | ||||
-rw-r--r-- | arch/x86/include/asm/switch_to.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 1 |
4 files changed, 16 insertions, 20 deletions
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 5343c19814b3..948b6d8ec46f 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | #include <linux/types.h> | 7 | #include <linux/types.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/sched/task_stack.h> | ||
9 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
10 | #include <asm/user32.h> | 11 | #include <asm/user32.h> |
11 | #include <asm/unistd.h> | 12 | #include <asm/unistd.h> |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index ad59cec14239..ae2ae6d80674 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -430,7 +430,9 @@ typedef struct { | |||
430 | struct thread_struct { | 430 | struct thread_struct { |
431 | /* Cached TLS descriptors: */ | 431 | /* Cached TLS descriptors: */ |
432 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 432 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
433 | #ifdef CONFIG_X86_32 | ||
433 | unsigned long sp0; | 434 | unsigned long sp0; |
435 | #endif | ||
434 | unsigned long sp; | 436 | unsigned long sp; |
435 | #ifdef CONFIG_X86_32 | 437 | #ifdef CONFIG_X86_32 |
436 | unsigned long sysenter_cs; | 438 | unsigned long sysenter_cs; |
@@ -797,6 +799,13 @@ static inline void spin_lock_prefetch(const void *x) | |||
797 | 799 | ||
798 | #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) | 800 | #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1)) |
799 | 801 | ||
802 | #define task_pt_regs(task) \ | ||
803 | ({ \ | ||
804 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ | ||
805 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ | ||
806 | ((struct pt_regs *)__ptr) - 1; \ | ||
807 | }) | ||
808 | |||
800 | #ifdef CONFIG_X86_32 | 809 | #ifdef CONFIG_X86_32 |
801 | /* | 810 | /* |
802 | * User space process size: 3GB (default). | 811 | * User space process size: 3GB (default). |
@@ -816,23 +825,6 @@ static inline void spin_lock_prefetch(const void *x) | |||
816 | .addr_limit = KERNEL_DS, \ | 825 | .addr_limit = KERNEL_DS, \ |
817 | } | 826 | } |
818 | 827 | ||
819 | /* | ||
820 | * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. | ||
821 | * This is necessary to guarantee that the entire "struct pt_regs" | ||
822 | * is accessible even if the CPU haven't stored the SS/ESP registers | ||
823 | * on the stack (interrupt gate does not save these registers | ||
824 | * when switching to the same priv ring). | ||
825 | * Therefore beware: accessing the ss/esp fields of the | ||
826 | * "struct pt_regs" is possible, but they may contain the | ||
827 | * completely wrong values. | ||
828 | */ | ||
829 | #define task_pt_regs(task) \ | ||
830 | ({ \ | ||
831 | unsigned long __ptr = (unsigned long)task_stack_page(task); \ | ||
832 | __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ | ||
833 | ((struct pt_regs *)__ptr) - 1; \ | ||
834 | }) | ||
835 | |||
836 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 828 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
837 | 829 | ||
838 | #else | 830 | #else |
@@ -866,11 +858,9 @@ static inline void spin_lock_prefetch(const void *x) | |||
866 | #define STACK_TOP_MAX TASK_SIZE_MAX | 858 | #define STACK_TOP_MAX TASK_SIZE_MAX |
867 | 859 | ||
868 | #define INIT_THREAD { \ | 860 | #define INIT_THREAD { \ |
869 | .sp0 = TOP_OF_INIT_STACK, \ | ||
870 | .addr_limit = KERNEL_DS, \ | 861 | .addr_limit = KERNEL_DS, \ |
871 | } | 862 | } |
872 | 863 | ||
873 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | ||
874 | extern unsigned long KSTK_ESP(struct task_struct *task); | 864 | extern unsigned long KSTK_ESP(struct task_struct *task); |
875 | 865 | ||
876 | #endif /* CONFIG_X86_64 */ | 866 | #endif /* CONFIG_X86_64 */ |
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 54e64d909725..010cd6e4eafc 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_SWITCH_TO_H | 1 | #ifndef _ASM_X86_SWITCH_TO_H |
2 | #define _ASM_X86_SWITCH_TO_H | 2 | #define _ASM_X86_SWITCH_TO_H |
3 | 3 | ||
4 | #include <linux/sched/task_stack.h> | ||
5 | |||
4 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | 6 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
5 | 7 | ||
6 | struct task_struct *__switch_to_asm(struct task_struct *prev, | 8 | struct task_struct *__switch_to_asm(struct task_struct *prev, |
@@ -87,7 +89,11 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread) | |||
87 | /* This is used when switching tasks or entering/exiting vm86 mode. */ | 89 | /* This is used when switching tasks or entering/exiting vm86 mode. */ |
88 | static inline void update_sp0(struct task_struct *task) | 90 | static inline void update_sp0(struct task_struct *task) |
89 | { | 91 | { |
92 | #ifdef CONFIG_X86_32 | ||
90 | load_sp0(task->thread.sp0); | 93 | load_sp0(task->thread.sp0); |
94 | #else | ||
95 | load_sp0(task_top_of_stack(task)); | ||
96 | #endif | ||
91 | } | 97 | } |
92 | 98 | ||
93 | #endif /* _ASM_X86_SWITCH_TO_H */ | 99 | #endif /* _ASM_X86_SWITCH_TO_H */ |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 45e380958392..eeeb34f85c25 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -274,7 +274,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, | |||
274 | struct inactive_task_frame *frame; | 274 | struct inactive_task_frame *frame; |
275 | struct task_struct *me = current; | 275 | struct task_struct *me = current; |
276 | 276 | ||
277 | p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; | ||
278 | childregs = task_pt_regs(p); | 277 | childregs = task_pt_regs(p); |
279 | fork_frame = container_of(childregs, struct fork_frame, regs); | 278 | fork_frame = container_of(childregs, struct fork_frame, regs); |
280 | frame = &fork_frame->frame; | 279 | frame = &fork_frame->frame; |