aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-09-23 13:50:52 -0400
committerIngo Molnar <mingo@kernel.org>2014-10-28 06:22:09 -0400
commitd4f829dd9026797bd5db8715a30192f23b22afaa (patch)
treee73421df7744004de3c55f814b42d2702ccf50ab
parentb93590901a01a6d036b3b7c856bcc5724fdb9911 (diff)
x86_64/vdso: Move getcpu code from vsyscall_64.c to vdso/vma.c
This is pure cut-and-paste. At this point, vsyscall_64.c contains only code needed for vsyscall emulation, but some of the comments and function names are still confused. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Link: http://lkml.kernel.org/r/a244daf7d3cbe71afc08ad09fdfe1866ca1f1978.1411494540.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/vsyscall_64.c57
-rw-r--r--arch/x86/vdso/vma.c61
2 files changed, 61 insertions, 57 deletions
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 521d5ed19547..2f9ef0c1d112 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -332,49 +332,6 @@ int in_gate_area_no_mm(unsigned long addr)
332 return (addr & PAGE_MASK) == VSYSCALL_ADDR; 332 return (addr & PAGE_MASK) == VSYSCALL_ADDR;
333} 333}
334 334
335/*
336 * Assume __initcall executes before all user space. Hopefully kmod
337 * doesn't violate that. We'll find out if it does.
338 */
339static void vsyscall_set_cpu(int cpu)
340{
341 unsigned long d;
342 unsigned long node = 0;
343#ifdef CONFIG_NUMA
344 node = cpu_to_node(cpu);
345#endif
346 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
347 write_rdtscp_aux((node << 12) | cpu);
348
349 /*
350 * Store cpu number in limit so that it can be loaded quickly
351 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
352 */
353 d = 0x0f40000000000ULL;
354 d |= cpu;
355 d |= (node & 0xf) << 12;
356 d |= (node >> 4) << 48;
357
358 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
359}
360
361static void cpu_vsyscall_init(void *arg)
362{
363 /* preemption should be already off */
364 vsyscall_set_cpu(raw_smp_processor_id());
365}
366
367static int
368cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
369{
370 long cpu = (long)arg;
371
372 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
373 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
374
375 return NOTIFY_DONE;
376}
377
378void __init map_vsyscall(void) 335void __init map_vsyscall(void)
379{ 336{
380 extern char __vsyscall_page; 337 extern char __vsyscall_page;
@@ -387,17 +344,3 @@ void __init map_vsyscall(void)
387 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != 344 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
388 (unsigned long)VSYSCALL_ADDR); 345 (unsigned long)VSYSCALL_ADDR);
389} 346}
390
391static int __init vsyscall_init(void)
392{
393 cpu_notifier_register_begin();
394
395 on_each_cpu(cpu_vsyscall_init, NULL, 1);
396 /* notifier priority > KVM */
397 __hotcpu_notifier(cpu_vsyscall_notifier, 30);
398
399 cpu_notifier_register_done();
400
401 return 0;
402}
403__initcall(vsyscall_init);
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 970463b566cf..a155dca5edb5 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -10,12 +10,14 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <linux/elf.h> 12#include <linux/elf.h>
13#include <linux/cpu.h>
13#include <asm/vsyscall.h> 14#include <asm/vsyscall.h>
14#include <asm/vgtod.h> 15#include <asm/vgtod.h>
15#include <asm/proto.h> 16#include <asm/proto.h>
16#include <asm/vdso.h> 17#include <asm/vdso.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#include <asm/hpet.h> 19#include <asm/hpet.h>
20#include <asm/desc.h>
19 21
20#if defined(CONFIG_X86_64) 22#if defined(CONFIG_X86_64)
21unsigned int __read_mostly vdso64_enabled = 1; 23unsigned int __read_mostly vdso64_enabled = 1;
@@ -238,3 +240,62 @@ static __init int vdso_setup(char *s)
238} 240}
239__setup("vdso=", vdso_setup); 241__setup("vdso=", vdso_setup);
240#endif 242#endif
243
244#ifdef CONFIG_X86_64
245/*
246 * Assume __initcall executes before all user space. Hopefully kmod
247 * doesn't violate that. We'll find out if it does.
248 */
249static void vsyscall_set_cpu(int cpu)
250{
251 unsigned long d;
252 unsigned long node = 0;
253#ifdef CONFIG_NUMA
254 node = cpu_to_node(cpu);
255#endif
256 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
257 write_rdtscp_aux((node << 12) | cpu);
258
259 /*
260 * Store cpu number in limit so that it can be loaded quickly
261 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
262 */
263 d = 0x0f40000000000ULL;
264 d |= cpu;
265 d |= (node & 0xf) << 12;
266 d |= (node >> 4) << 48;
267
268 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
269}
270
271static void cpu_vsyscall_init(void *arg)
272{
273 /* preemption should be already off */
274 vsyscall_set_cpu(raw_smp_processor_id());
275}
276
277static int
278cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
279{
280 long cpu = (long)arg;
281
282 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
283 smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
284
285 return NOTIFY_DONE;
286}
287
288static int __init vsyscall_init(void)
289{
290 cpu_notifier_register_begin();
291
292 on_each_cpu(cpu_vsyscall_init, NULL, 1);
293 /* notifier priority > KVM */
294 __hotcpu_notifier(cpu_vsyscall_notifier, 30);
295
296 cpu_notifier_register_done();
297
298 return 0;
299}
300__initcall(vsyscall_init);
301#endif