diff options
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r-- | arch/x86/vdso/Makefile | 21 | ||||
-rw-r--r-- | arch/x86/vdso/vclock_gettime.c | 74 | ||||
-rw-r--r-- | arch/x86/vdso/vdso.lds.S | 9 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 15 | ||||
-rw-r--r-- | arch/x86/vdso/vextern.h | 16 | ||||
-rw-r--r-- | arch/x86/vdso/vgetcpu.c | 3 | ||||
-rw-r--r-- | arch/x86/vdso/vma.c | 27 | ||||
-rw-r--r-- | arch/x86/vdso/vvar.c | 12 |
8 files changed, 81 insertions, 96 deletions
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 4a2afa1bac51..bef0bc962400 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -11,7 +11,7 @@ vdso-install-$(VDSO32-y) += $(vdso32-images) | |||
11 | 11 | ||
12 | 12 | ||
13 | # files to link into the vdso | 13 | # files to link into the vdso |
14 | vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o | 14 | vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o |
15 | 15 | ||
16 | # files to link into kernel | 16 | # files to link into kernel |
17 | obj-$(VDSO64-y) += vma.o vdso.o | 17 | obj-$(VDSO64-y) += vma.o vdso.o |
@@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) | |||
25 | 25 | ||
26 | export CPPFLAGS_vdso.lds += -P -C | 26 | export CPPFLAGS_vdso.lds += -P -C |
27 | 27 | ||
28 | VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \ | 28 | VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ |
29 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 | 29 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 |
30 | 30 | ||
31 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so | 31 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so |
@@ -37,11 +37,24 @@ $(obj)/%.so: OBJCOPYFLAGS := -S | |||
37 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | 37 | $(obj)/%.so: $(obj)/%.so.dbg FORCE |
38 | $(call if_changed,objcopy) | 38 | $(call if_changed,objcopy) |
39 | 39 | ||
40 | # | ||
41 | # Don't omit frame pointers for ease of userspace debugging, but do | ||
42 | # optimize sibling calls. | ||
43 | # | ||
40 | CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ | 44 | CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ |
41 | $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) | 45 | $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ |
46 | -fno-omit-frame-pointer -foptimize-sibling-calls | ||
42 | 47 | ||
43 | $(vobjs): KBUILD_CFLAGS += $(CFL) | 48 | $(vobjs): KBUILD_CFLAGS += $(CFL) |
44 | 49 | ||
50 | # | ||
51 | # vDSO code runs in userspace and -pg doesn't help with profiling anyway. | ||
52 | # | ||
53 | CFLAGS_REMOVE_vdso-note.o = -pg | ||
54 | CFLAGS_REMOVE_vclock_gettime.o = -pg | ||
55 | CFLAGS_REMOVE_vgetcpu.o = -pg | ||
56 | CFLAGS_REMOVE_vvar.o = -pg | ||
57 | |||
45 | targets += vdso-syms.lds | 58 | targets += vdso-syms.lds |
46 | obj-$(VDSO64-y) += vdso-syms.lds | 59 | obj-$(VDSO64-y) += vdso-syms.lds |
47 | 60 | ||
@@ -69,7 +82,7 @@ vdso32.so-$(VDSO32-y) += sysenter | |||
69 | vdso32-images = $(vdso32.so-y:%=vdso32-%.so) | 82 | vdso32-images = $(vdso32.so-y:%=vdso32-%.so) |
70 | 83 | ||
71 | CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) | 84 | CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) |
72 | VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1 | 85 | VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1 |
73 | 86 | ||
74 | # This makes sure the $(obj) subdirectory exists even though vdso32/ | 87 | # This makes sure the $(obj) subdirectory exists even though vdso32/ |
75 | # is not a kbuild sub-make subdirectory. | 88 | # is not a kbuild sub-make subdirectory. |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index ee55754cc3c5..a724905fdae7 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Copyright 2006 Andi Kleen, SUSE Labs. | 2 | * Copyright 2006 Andi Kleen, SUSE Labs. |
3 | * Subject to the GNU Public License, v.2 | 3 | * Subject to the GNU Public License, v.2 |
4 | * | 4 | * |
5 | * Fast user context implementation of clock_gettime and gettimeofday. | 5 | * Fast user context implementation of clock_gettime, gettimeofday, and time. |
6 | * | 6 | * |
7 | * The code should have no internal unresolved relocations. | 7 | * The code should have no internal unresolved relocations. |
8 | * Check with readelf after changing. | 8 | * Check with readelf after changing. |
@@ -22,9 +22,8 @@ | |||
22 | #include <asm/hpet.h> | 22 | #include <asm/hpet.h> |
23 | #include <asm/unistd.h> | 23 | #include <asm/unistd.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include "vextern.h" | ||
26 | 25 | ||
27 | #define gtod vdso_vsyscall_gtod_data | 26 | #define gtod (&VVAR(vsyscall_gtod_data)) |
28 | 27 | ||
29 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) | 28 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) |
30 | { | 29 | { |
@@ -56,22 +55,6 @@ notrace static noinline int do_realtime(struct timespec *ts) | |||
56 | return 0; | 55 | return 0; |
57 | } | 56 | } |
58 | 57 | ||
59 | /* Copy of the version in kernel/time.c which we cannot directly access */ | ||
60 | notrace static void | ||
61 | vset_normalized_timespec(struct timespec *ts, long sec, long nsec) | ||
62 | { | ||
63 | while (nsec >= NSEC_PER_SEC) { | ||
64 | nsec -= NSEC_PER_SEC; | ||
65 | ++sec; | ||
66 | } | ||
67 | while (nsec < 0) { | ||
68 | nsec += NSEC_PER_SEC; | ||
69 | --sec; | ||
70 | } | ||
71 | ts->tv_sec = sec; | ||
72 | ts->tv_nsec = nsec; | ||
73 | } | ||
74 | |||
75 | notrace static noinline int do_monotonic(struct timespec *ts) | 58 | notrace static noinline int do_monotonic(struct timespec *ts) |
76 | { | 59 | { |
77 | unsigned long seq, ns, secs; | 60 | unsigned long seq, ns, secs; |
@@ -82,7 +65,17 @@ notrace static noinline int do_monotonic(struct timespec *ts) | |||
82 | secs += gtod->wall_to_monotonic.tv_sec; | 65 | secs += gtod->wall_to_monotonic.tv_sec; |
83 | ns += gtod->wall_to_monotonic.tv_nsec; | 66 | ns += gtod->wall_to_monotonic.tv_nsec; |
84 | } while (unlikely(read_seqretry(>od->lock, seq))); | 67 | } while (unlikely(read_seqretry(>od->lock, seq))); |
85 | vset_normalized_timespec(ts, secs, ns); | 68 | |
69 | /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec | ||
70 | * are all guaranteed to be nonnegative. | ||
71 | */ | ||
72 | while (ns >= NSEC_PER_SEC) { | ||
73 | ns -= NSEC_PER_SEC; | ||
74 | ++secs; | ||
75 | } | ||
76 | ts->tv_sec = secs; | ||
77 | ts->tv_nsec = ns; | ||
78 | |||
86 | return 0; | 79 | return 0; |
87 | } | 80 | } |
88 | 81 | ||
@@ -107,7 +100,17 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) | |||
107 | secs += gtod->wall_to_monotonic.tv_sec; | 100 | secs += gtod->wall_to_monotonic.tv_sec; |
108 | ns += gtod->wall_to_monotonic.tv_nsec; | 101 | ns += gtod->wall_to_monotonic.tv_nsec; |
109 | } while (unlikely(read_seqretry(>od->lock, seq))); | 102 | } while (unlikely(read_seqretry(>od->lock, seq))); |
110 | vset_normalized_timespec(ts, secs, ns); | 103 | |
104 | /* wall_time_nsec and wall_to_monotonic.tv_nsec are | ||
105 | * guaranteed to be between 0 and NSEC_PER_SEC. | ||
106 | */ | ||
107 | if (ns >= NSEC_PER_SEC) { | ||
108 | ns -= NSEC_PER_SEC; | ||
109 | ++secs; | ||
110 | } | ||
111 | ts->tv_sec = secs; | ||
112 | ts->tv_nsec = ns; | ||
113 | |||
111 | return 0; | 114 | return 0; |
112 | } | 115 | } |
113 | 116 | ||
@@ -157,3 +160,32 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | |||
157 | } | 160 | } |
158 | int gettimeofday(struct timeval *, struct timezone *) | 161 | int gettimeofday(struct timeval *, struct timezone *) |
159 | __attribute__((weak, alias("__vdso_gettimeofday"))); | 162 | __attribute__((weak, alias("__vdso_gettimeofday"))); |
163 | |||
164 | /* This will break when the xtime seconds get inaccurate, but that is | ||
165 | * unlikely */ | ||
166 | |||
167 | static __always_inline long time_syscall(long *t) | ||
168 | { | ||
169 | long secs; | ||
170 | asm volatile("syscall" | ||
171 | : "=a" (secs) | ||
172 | : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory"); | ||
173 | return secs; | ||
174 | } | ||
175 | |||
176 | notrace time_t __vdso_time(time_t *t) | ||
177 | { | ||
178 | time_t result; | ||
179 | |||
180 | if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) | ||
181 | return time_syscall(t); | ||
182 | |||
183 | /* This is atomic on x86_64 so we don't need any locks. */ | ||
184 | result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); | ||
185 | |||
186 | if (t) | ||
187 | *t = result; | ||
188 | return result; | ||
189 | } | ||
190 | int time(time_t *t) | ||
191 | __attribute__((weak, alias("__vdso_time"))); | ||
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S index 4e5dd3b4de7f..b96b2677cad8 100644 --- a/arch/x86/vdso/vdso.lds.S +++ b/arch/x86/vdso/vdso.lds.S | |||
@@ -23,15 +23,10 @@ VERSION { | |||
23 | __vdso_gettimeofday; | 23 | __vdso_gettimeofday; |
24 | getcpu; | 24 | getcpu; |
25 | __vdso_getcpu; | 25 | __vdso_getcpu; |
26 | time; | ||
27 | __vdso_time; | ||
26 | local: *; | 28 | local: *; |
27 | }; | 29 | }; |
28 | } | 30 | } |
29 | 31 | ||
30 | VDSO64_PRELINK = VDSO_PRELINK; | 32 | VDSO64_PRELINK = VDSO_PRELINK; |
31 | |||
32 | /* | ||
33 | * Define VDSO64_x for each VEXTERN(x), for use via VDSO64_SYMBOL. | ||
34 | */ | ||
35 | #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x; | ||
36 | #include "vextern.h" | ||
37 | #undef VEXTERN | ||
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 36df991985b2..468d591dde31 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -417,24 +417,25 @@ const char *arch_vma_name(struct vm_area_struct *vma) | |||
417 | return NULL; | 417 | return NULL; |
418 | } | 418 | } |
419 | 419 | ||
420 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | 420 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
421 | { | 421 | { |
422 | struct mm_struct *mm = tsk->mm; | 422 | /* |
423 | 423 | * Check to see if the corresponding task was created in compat vdso | |
424 | /* Check to see if this task was created in compat vdso mode */ | 424 | * mode. |
425 | */ | ||
425 | if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) | 426 | if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) |
426 | return &gate_vma; | 427 | return &gate_vma; |
427 | return NULL; | 428 | return NULL; |
428 | } | 429 | } |
429 | 430 | ||
430 | int in_gate_area(struct task_struct *task, unsigned long addr) | 431 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
431 | { | 432 | { |
432 | const struct vm_area_struct *vma = get_gate_vma(task); | 433 | const struct vm_area_struct *vma = get_gate_vma(mm); |
433 | 434 | ||
434 | return vma && addr >= vma->vm_start && addr < vma->vm_end; | 435 | return vma && addr >= vma->vm_start && addr < vma->vm_end; |
435 | } | 436 | } |
436 | 437 | ||
437 | int in_gate_area_no_task(unsigned long addr) | 438 | int in_gate_area_no_mm(unsigned long addr) |
438 | { | 439 | { |
439 | return 0; | 440 | return 0; |
440 | } | 441 | } |
diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h deleted file mode 100644 index 1683ba2ae3e8..000000000000 --- a/arch/x86/vdso/vextern.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef VEXTERN | ||
2 | #include <asm/vsyscall.h> | ||
3 | #define VEXTERN(x) \ | ||
4 | extern typeof(x) *vdso_ ## x __attribute__((visibility("hidden"))); | ||
5 | #endif | ||
6 | |||
7 | #define VMAGIC 0xfeedbabeabcdefabUL | ||
8 | |||
9 | /* Any kernel variables used in the vDSO must be exported in the main | ||
10 | kernel's vmlinux.lds.S/vsyscall.h/proper __section and | ||
11 | put into vextern.h and be referenced as a pointer with vdso prefix. | ||
12 | The main kernel later fills in the values. */ | ||
13 | |||
14 | VEXTERN(jiffies) | ||
15 | VEXTERN(vgetcpu_mode) | ||
16 | VEXTERN(vsyscall_gtod_data) | ||
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c index 9fbc6b20026b..5463ad558573 100644 --- a/arch/x86/vdso/vgetcpu.c +++ b/arch/x86/vdso/vgetcpu.c | |||
@@ -11,14 +11,13 @@ | |||
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | #include <asm/vsyscall.h> | 12 | #include <asm/vsyscall.h> |
13 | #include <asm/vgtod.h> | 13 | #include <asm/vgtod.h> |
14 | #include "vextern.h" | ||
15 | 14 | ||
16 | notrace long | 15 | notrace long |
17 | __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) | 16 | __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) |
18 | { | 17 | { |
19 | unsigned int p; | 18 | unsigned int p; |
20 | 19 | ||
21 | if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { | 20 | if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { |
22 | /* Load per CPU data from RDTSCP */ | 21 | /* Load per CPU data from RDTSCP */ |
23 | native_read_tscp(&p); | 22 | native_read_tscp(&p); |
24 | } else { | 23 | } else { |
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 4b5d26f108bb..7abd2be0f9b9 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/proto.h> | 15 | #include <asm/proto.h> |
16 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
17 | 17 | ||
18 | #include "vextern.h" /* Just for VMAGIC. */ | ||
19 | #undef VEXTERN | ||
20 | |||
21 | unsigned int __read_mostly vdso_enabled = 1; | 18 | unsigned int __read_mostly vdso_enabled = 1; |
22 | 19 | ||
23 | extern char vdso_start[], vdso_end[]; | 20 | extern char vdso_start[], vdso_end[]; |
@@ -26,20 +23,10 @@ extern unsigned short vdso_sync_cpuid; | |||
26 | static struct page **vdso_pages; | 23 | static struct page **vdso_pages; |
27 | static unsigned vdso_size; | 24 | static unsigned vdso_size; |
28 | 25 | ||
29 | static inline void *var_ref(void *p, char *name) | ||
30 | { | ||
31 | if (*(void **)p != (void *)VMAGIC) { | ||
32 | printk("VDSO: variable %s broken\n", name); | ||
33 | vdso_enabled = 0; | ||
34 | } | ||
35 | return p; | ||
36 | } | ||
37 | |||
38 | static int __init init_vdso_vars(void) | 26 | static int __init init_vdso_vars(void) |
39 | { | 27 | { |
40 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; | 28 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; |
41 | int i; | 29 | int i; |
42 | char *vbase; | ||
43 | 30 | ||
44 | vdso_size = npages << PAGE_SHIFT; | 31 | vdso_size = npages << PAGE_SHIFT; |
45 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); | 32 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); |
@@ -54,20 +41,6 @@ static int __init init_vdso_vars(void) | |||
54 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); | 41 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); |
55 | } | 42 | } |
56 | 43 | ||
57 | vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL); | ||
58 | if (!vbase) | ||
59 | goto oom; | ||
60 | |||
61 | if (memcmp(vbase, "\177ELF", 4)) { | ||
62 | printk("VDSO: I'm broken; not ELF\n"); | ||
63 | vdso_enabled = 0; | ||
64 | } | ||
65 | |||
66 | #define VEXTERN(x) \ | ||
67 | *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; | ||
68 | #include "vextern.h" | ||
69 | #undef VEXTERN | ||
70 | vunmap(vbase); | ||
71 | return 0; | 44 | return 0; |
72 | 45 | ||
73 | oom: | 46 | oom: |
diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c deleted file mode 100644 index 1b7e703684f9..000000000000 --- a/arch/x86/vdso/vvar.c +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | /* Define pointer to external vDSO variables. | ||
2 | These are part of the vDSO. The kernel fills in the real addresses | ||
3 | at boot time. This is done because when the vdso is linked the | ||
4 | kernel isn't yet and we don't know the final addresses. */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/time.h> | ||
7 | #include <asm/vsyscall.h> | ||
8 | #include <asm/timex.h> | ||
9 | #include <asm/vgtod.h> | ||
10 | |||
11 | #define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC; | ||
12 | #include "vextern.h" | ||