aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 17:24:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 17:24:20 -0500
commit3100e448e7d74489a96cb7b45d88fe6962774eaa (patch)
tree53e46a702bd191ca43639b560d2bb1d3b0ad18c8 /arch/x86/include
parentc9f861c77269bc9950c16c6404a9476062241671 (diff)
parent26893107aa717cd11010f0c278d02535defa1ac9 (diff)
Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 vdso updates from Ingo Molnar: "Various vDSO updates from Andy Lutomirski, mostly cleanups and reorganization to improve maintainability, but also some micro-optimizations and robustization changes" * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86_64/vsyscall: Restore orig_ax after vsyscall seccomp x86_64: Add a comment explaining the TASK_SIZE_MAX guard page x86_64,vsyscall: Make vsyscall emulation configurable x86_64, vsyscall: Rewrite comment and clean up headers in vsyscall code x86_64, vsyscall: Turn vsyscalls all the way off when vsyscall==none x86,vdso: Use LSL unconditionally for vgetcpu x86: vdso: Fix build with older gcc x86_64/vdso: Clean up vgetcpu init and merge the vdso initcalls x86_64/vdso: Remove jiffies from the vvar page x86/vdso: Make the PER_CPU segment 32 bits x86/vdso: Make the PER_CPU segment start out accessed x86/vdso: Change the PER_CPU segment to use struct desc_struct x86_64/vdso: Move getcpu code from vsyscall_64.c to vdso/vma.c x86_64/vsyscall: Move all of the gate_area code to vsyscall_64.c
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/processor.h8
-rw-r--r--arch/x86/include/asm/vgtod.h19
-rw-r--r--arch/x86/include/asm/vsyscall.h33
-rw-r--r--arch/x86/include/asm/vvar.h2
6 files changed, 37 insertions, 31 deletions
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index bf728e49c53c..f80d70009ff8 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -69,7 +69,9 @@ enum fixed_addresses {
69#ifdef CONFIG_X86_32 69#ifdef CONFIG_X86_32
70 FIX_HOLE, 70 FIX_HOLE,
71#else 71#else
72#ifdef CONFIG_X86_VSYSCALL_EMULATION
72 VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, 73 VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
74#endif
73#ifdef CONFIG_PARAVIRT_CLOCK 75#ifdef CONFIG_PARAVIRT_CLOCK
74 PVCLOCK_FIXMAP_BEGIN, 76 PVCLOCK_FIXMAP_BEGIN,
75 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, 77 PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index f408caf73430..b3bebf9e5746 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -39,6 +39,8 @@ void copy_page(void *to, void *from);
39 39
40#endif /* !__ASSEMBLY__ */ 40#endif /* !__ASSEMBLY__ */
41 41
42#define __HAVE_ARCH_GATE_AREA 1 42#ifdef CONFIG_X86_VSYSCALL_EMULATION
43# define __HAVE_ARCH_GATE_AREA 1
44#endif
43 45
44#endif /* _ASM_X86_PAGE_64_H */ 46#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 25b8de0f21c0..a092a0cce0b7 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -894,7 +894,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
894 894
895#else 895#else
896/* 896/*
897 * User space process size. 47bits minus one guard page. 897 * User space process size. 47bits minus one guard page. The guard
898 * page is necessary on Intel CPUs: if a SYSCALL instruction is at
899 * the highest possible canonical userspace address, then that
900 * syscall will enter the kernel with a non-canonical return
901 * address, and SYSRET will explode dangerously. We avoid this
902 * particular problem by preventing anything from being mapped
903 * at the maximum canonical address.
898 */ 904 */
899#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) 905#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
900 906
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 3c3366c2e37f..e7e9682a33e9 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -70,4 +70,23 @@ static inline void gtod_write_end(struct vsyscall_gtod_data *s)
70 ++s->seq; 70 ++s->seq;
71} 71}
72 72
73#ifdef CONFIG_X86_64
74
75#define VGETCPU_CPU_MASK 0xfff
76
77static inline unsigned int __getcpu(void)
78{
79 unsigned int p;
80
81 /*
82 * Load per CPU data from GDT. LSL is faster than RDTSCP and
83 * works on all CPUs.
84 */
85 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
86
87 return p;
88}
89
90#endif /* CONFIG_X86_64 */
91
73#endif /* _ASM_X86_VGTOD_H */ 92#endif /* _ASM_X86_VGTOD_H */
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 2a46ca720afc..6ba66ee79710 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -4,15 +4,7 @@
4#include <linux/seqlock.h> 4#include <linux/seqlock.h>
5#include <uapi/asm/vsyscall.h> 5#include <uapi/asm/vsyscall.h>
6 6
7#define VGETCPU_RDTSCP 1 7#ifdef CONFIG_X86_VSYSCALL_EMULATION
8#define VGETCPU_LSL 2
9
10/* kernel space (writeable) */
11extern int vgetcpu_mode;
12extern struct timezone sys_tz;
13
14#include <asm/vvar.h>
15
16extern void map_vsyscall(void); 8extern void map_vsyscall(void);
17 9
18/* 10/*
@@ -20,25 +12,12 @@ extern void map_vsyscall(void);
20 * Returns true if handled. 12 * Returns true if handled.
21 */ 13 */
22extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); 14extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
23 15#else
24#ifdef CONFIG_X86_64 16static inline void map_vsyscall(void) {}
25 17static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
26#define VGETCPU_CPU_MASK 0xfff
27
28static inline unsigned int __getcpu(void)
29{ 18{
30 unsigned int p; 19 return false;
31
32 if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
33 /* Load per CPU data from RDTSCP */
34 native_read_tscp(&p);
35 } else {
36 /* Load per CPU data from GDT */
37 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
38 }
39
40 return p;
41} 20}
42#endif /* CONFIG_X86_64 */ 21#endif
43 22
44#endif /* _ASM_X86_VSYSCALL_H */ 23#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index 5d2b9ad2c6d2..3f32dfc2ab73 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -44,8 +44,6 @@ extern char __vvar_page;
44 44
45/* DECLARE_VVAR(offset, type, name) */ 45/* DECLARE_VVAR(offset, type, name) */
46 46
47DECLARE_VVAR(0, volatile unsigned long, jiffies)
48DECLARE_VVAR(16, int, vgetcpu_mode)
49DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) 47DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
50 48
51#undef DECLARE_VVAR 49#undef DECLARE_VVAR