aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2015-03-05 22:19:06 -0500
committerIngo Molnar <mingo@kernel.org>2015-03-06 02:32:58 -0500
commitd0a0de21f82bbc1737ea3c831f018d0c2bc6b9c2 (patch)
treeb7cfb95be9f42d9942d25f607ccfcb05130b1018
parent24933b82c0d9a711475a5ef7904eb733f561e637 (diff)
x86/asm/entry: Remove INIT_TSS and fold the definitions into 'cpu_tss'
The INIT_TSS is unnecessary. Just define the initial TSS where 'cpu_tss' is defined. While we're at it, merge the 32-bit and 64-bit definitions. The only syntactic change is that 32-bit kernels were computing sp0 as long, but now they compute it as unsigned long. Verified by objdump: the contents and relocations of .data..percpu..shared_aligned are unchanged on 32-bit and 64-bit kernels. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/8fc39fa3f6c5d635e93afbdd1a0fe0678a6d7913.1425611534.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/processor.h20
-rw-r--r--arch/x86/kernel/process.c20
2 files changed, 19 insertions, 21 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 117ee65473e2..f5e3ec63767d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -818,22 +818,6 @@ static inline void spin_lock_prefetch(const void *x)
818 .io_bitmap_ptr = NULL, \ 818 .io_bitmap_ptr = NULL, \
819} 819}
820 820
821/*
822 * Note that the .io_bitmap member must be extra-big. This is because
823 * the CPU will access an additional byte beyond the end of the IO
824 * permission bitmap. The extra byte must be all 1 bits, and must
825 * be within the limit.
826 */
827#define INIT_TSS { \
828 .x86_tss = { \
829 .sp0 = sizeof(init_stack) + (long)&init_stack, \
830 .ss0 = __KERNEL_DS, \
831 .ss1 = __KERNEL_CS, \
832 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
833 }, \
834 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
835}
836
837extern unsigned long thread_saved_pc(struct task_struct *tsk); 821extern unsigned long thread_saved_pc(struct task_struct *tsk);
838 822
839#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 823#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
@@ -892,10 +876,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
892 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ 876 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
893} 877}
894 878
895#define INIT_TSS { \
896 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
897}
898
899/* 879/*
900 * Return saved PC of a blocked thread. 880 * Return saved PC of a blocked thread.
901 * What is this good for? it will be always the scheduler or ret_from_fork. 881 * What is this good for? it will be always the scheduler or ret_from_fork.
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6f6087349231..f4c0af7fc3a0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -37,7 +37,25 @@
37 * section. Since TSS's are completely CPU-local, we want them 37 * section. Since TSS's are completely CPU-local, we want them
38 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 38 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
39 */ 39 */
40__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = INIT_TSS; 40__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
41 .x86_tss = {
42 .sp0 = (unsigned long)&init_stack + sizeof(init_stack),
43#ifdef CONFIG_X86_32
44 .ss0 = __KERNEL_DS,
45 .ss1 = __KERNEL_CS,
46 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
47#endif
48 },
49#ifdef CONFIG_X86_32
50 /*
51 * Note that the .io_bitmap member must be extra-big. This is because
52 * the CPU will access an additional byte beyond the end of the IO
53 * permission bitmap. The extra byte must be all 1 bits, and must
54 * be within the limit.
55 */
56 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
57#endif
58};
41EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss); 59EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
42 60
43#ifdef CONFIG_X86_64 61#ifdef CONFIG_X86_64