aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso/vma.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-09-23 13:50:57 -0400
committerIngo Molnar <mingo@kernel.org>2014-10-28 06:22:14 -0400
commit1c0c1b93df4dad43b8050db005bb1c03bc7e09bf (patch)
tree16182a691cacd937c2d66d251f6ddab3e40de835 /arch/x86/vdso/vma.c
parent61a492fb1759f3e892ad0408e36d3575c5f890d0 (diff)
x86_64/vdso: Clean up vgetcpu init and merge the vdso initcalls
Now vdso/vma.c has a single initcall and no references to "vsyscall". Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/945c463e2804fedd8b08d63a040cbe85d55195aa.1411494540.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/vdso/vma.c')
-rw-r--r--arch/x86/vdso/vma.c54
1 files changed, 18 insertions, 36 deletions
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 32ca60c8157b..a280b11e2122 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs. 2 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2 3 * Subject to the GPL, v.2
4 *
5 * This contains most of the x86 vDSO kernel-side code.
5 */ 6 */
6#include <linux/mm.h> 7#include <linux/mm.h>
7#include <linux/err.h> 8#include <linux/err.h>
@@ -11,18 +12,16 @@
11#include <linux/random.h> 12#include <linux/random.h>
12#include <linux/elf.h> 13#include <linux/elf.h>
13#include <linux/cpu.h> 14#include <linux/cpu.h>
14#include <asm/vsyscall.h>
15#include <asm/vgtod.h> 15#include <asm/vgtod.h>
16#include <asm/proto.h> 16#include <asm/proto.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18#include <asm/vvar.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/hpet.h> 20#include <asm/hpet.h>
20#include <asm/desc.h> 21#include <asm/desc.h>
21 22
22#if defined(CONFIG_X86_64) 23#if defined(CONFIG_X86_64)
23unsigned int __read_mostly vdso64_enabled = 1; 24unsigned int __read_mostly vdso64_enabled = 1;
24
25extern unsigned short vdso_sync_cpuid;
26#endif 25#endif
27 26
28void __init init_vdso_image(const struct vdso_image *image) 27void __init init_vdso_image(const struct vdso_image *image)
@@ -40,20 +39,6 @@ void __init init_vdso_image(const struct vdso_image *image)
40 image->alt_len)); 39 image->alt_len));
41} 40}
42 41
43#if defined(CONFIG_X86_64)
44static int __init init_vdso(void)
45{
46 init_vdso_image(&vdso_image_64);
47
48#ifdef CONFIG_X86_X32_ABI
49 init_vdso_image(&vdso_image_x32);
50#endif
51
52 return 0;
53}
54subsys_initcall(init_vdso);
55#endif
56
57struct linux_binprm; 42struct linux_binprm;
58 43
59/* Put the vdso above the (randomized) stack with another randomized offset. 44/* Put the vdso above the (randomized) stack with another randomized offset.
@@ -242,12 +227,9 @@ __setup("vdso=", vdso_setup);
242#endif 227#endif
243 228
244#ifdef CONFIG_X86_64 229#ifdef CONFIG_X86_64
245/* 230static void vgetcpu_cpu_init(void *arg)
246 * Assume __initcall executes before all user space. Hopefully kmod
247 * doesn't violate that. We'll find out if it does.
248 */
249static void vsyscall_set_cpu(int cpu)
250{ 231{
232 int cpu = smp_processor_id();
251 struct desc_struct d; 233 struct desc_struct d;
252 unsigned long node = 0; 234 unsigned long node = 0;
253#ifdef CONFIG_NUMA 235#ifdef CONFIG_NUMA
@@ -274,34 +256,34 @@ static void vsyscall_set_cpu(int cpu)
274 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 256 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
275} 257}
276 258
277static void cpu_vsyscall_init(void *arg)
278{
279 /* preemption should be already off */
280 vsyscall_set_cpu(raw_smp_processor_id());
281}
282
283static int 259static int
284cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 260vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
285{ 261{
286 long cpu = (long)arg; 262 long cpu = (long)arg;
287 263
288 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) 264 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
289 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); 265 smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
290 266
291 return NOTIFY_DONE; 267 return NOTIFY_DONE;
292} 268}
293 269
294static int __init vsyscall_init(void) 270static int __init init_vdso(void)
295{ 271{
272 init_vdso_image(&vdso_image_64);
273
274#ifdef CONFIG_X86_X32_ABI
275 init_vdso_image(&vdso_image_x32);
276#endif
277
296 cpu_notifier_register_begin(); 278 cpu_notifier_register_begin();
297 279
298 on_each_cpu(cpu_vsyscall_init, NULL, 1); 280 on_each_cpu(vgetcpu_cpu_init, NULL, 1);
299 /* notifier priority > KVM */ 281 /* notifier priority > KVM */
300 __hotcpu_notifier(cpu_vsyscall_notifier, 30); 282 __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
301 283
302 cpu_notifier_register_done(); 284 cpu_notifier_register_done();
303 285
304 return 0; 286 return 0;
305} 287}
306__initcall(vsyscall_init); 288subsys_initcall(init_vdso);
307#endif 289#endif /* CONFIG_X86_64 */