diff options
author | Andy Lutomirski <luto@MIT.EDU> | 2011-05-23 09:31:24 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-24 08:51:28 -0400 |
commit | 8c49d9a74bac5ea3f18480307057241b808fcc0c (patch) | |
tree | a9d2d8160ca37e9292e605cf3cdd85b29646c4b7 /arch/x86/vdso/vma.c | |
parent | d762f4383100c2a87b1a3f2d678cd3b5425655b4 (diff) |
x86-64: Clean up vdso/kernel shared variables
Variables that are shared between the vdso and the kernel are
currently a bit of a mess. They are each defined with their own
magic, they are accessed differently in the kernel, the vsyscall page,
and the vdso, and one of them (vsyscall_clock) doesn't even really
exist.
This changes them all to use a common mechanism. All of them are
delcared in vvar.h with a fixed address (validated by the linker
script). In the kernel (as before), they look like ordinary
read-write variables. In the vsyscall page and the vdso, they are
accessed through a new macro VVAR, which gives read-only access.
The vdso is now loaded verbatim into memory without any fixups. As a
side bonus, access from the vdso is faster because a level of
indirection is removed.
While we're at it, pack jiffies and vgetcpu_mode into the same
cacheline.
Signed-off-by: Andy Lutomirski <luto@mit.edu>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Borislav Petkov <bp@amd64.org>
Link: http://lkml.kernel.org/r/%3C7357882fbb51fa30491636a7b6528747301b7ee9.1306156808.git.luto%40mit.edu%3E
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/vdso/vma.c')
-rw-r--r-- | arch/x86/vdso/vma.c | 27 |
1 files changed, 0 insertions, 27 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 4b5d26f108bb..7abd2be0f9b9 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/proto.h> | 15 | #include <asm/proto.h> |
16 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
17 | 17 | ||
18 | #include "vextern.h" /* Just for VMAGIC. */ | ||
19 | #undef VEXTERN | ||
20 | |||
21 | unsigned int __read_mostly vdso_enabled = 1; | 18 | unsigned int __read_mostly vdso_enabled = 1; |
22 | 19 | ||
23 | extern char vdso_start[], vdso_end[]; | 20 | extern char vdso_start[], vdso_end[]; |
@@ -26,20 +23,10 @@ extern unsigned short vdso_sync_cpuid; | |||
26 | static struct page **vdso_pages; | 23 | static struct page **vdso_pages; |
27 | static unsigned vdso_size; | 24 | static unsigned vdso_size; |
28 | 25 | ||
29 | static inline void *var_ref(void *p, char *name) | ||
30 | { | ||
31 | if (*(void **)p != (void *)VMAGIC) { | ||
32 | printk("VDSO: variable %s broken\n", name); | ||
33 | vdso_enabled = 0; | ||
34 | } | ||
35 | return p; | ||
36 | } | ||
37 | |||
38 | static int __init init_vdso_vars(void) | 26 | static int __init init_vdso_vars(void) |
39 | { | 27 | { |
40 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; | 28 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; |
41 | int i; | 29 | int i; |
42 | char *vbase; | ||
43 | 30 | ||
44 | vdso_size = npages << PAGE_SHIFT; | 31 | vdso_size = npages << PAGE_SHIFT; |
45 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); | 32 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); |
@@ -54,20 +41,6 @@ static int __init init_vdso_vars(void) | |||
54 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); | 41 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); |
55 | } | 42 | } |
56 | 43 | ||
57 | vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL); | ||
58 | if (!vbase) | ||
59 | goto oom; | ||
60 | |||
61 | if (memcmp(vbase, "\177ELF", 4)) { | ||
62 | printk("VDSO: I'm broken; not ELF\n"); | ||
63 | vdso_enabled = 0; | ||
64 | } | ||
65 | |||
66 | #define VEXTERN(x) \ | ||
67 | *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; | ||
68 | #include "vextern.h" | ||
69 | #undef VEXTERN | ||
70 | vunmap(vbase); | ||
71 | return 0; | 44 | return 0; |
72 | 45 | ||
73 | oom: | 46 | oom: |