diff options
Diffstat (limited to 'arch/x86/kernel/vsyscall_64.c')
-rw-r--r-- | arch/x86/kernel/vsyscall_64.c | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index ad4005c6d4a1..3f8242774580 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #include <asm/vgtod.h> | 43 | #include <asm/vgtod.h> |
44 | 44 | ||
45 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) | 45 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) |
46 | #define __syscall_clobber "r11","rcx","memory" | 46 | #define __syscall_clobber "r11","cx","memory" |
47 | #define __pa_vsymbol(x) \ | 47 | #define __pa_vsymbol(x) \ |
48 | ({unsigned long v; \ | 48 | ({unsigned long v; \ |
49 | extern char __vsyscall_0; \ | 49 | extern char __vsyscall_0; \ |
@@ -190,7 +190,7 @@ time_t __vsyscall(1) vtime(time_t *t) | |||
190 | long __vsyscall(2) | 190 | long __vsyscall(2) |
191 | vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | 191 | vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) |
192 | { | 192 | { |
193 | unsigned int dummy, p; | 193 | unsigned int p; |
194 | unsigned long j = 0; | 194 | unsigned long j = 0; |
195 | 195 | ||
196 | /* Fast cache - only recompute value once per jiffies and avoid | 196 | /* Fast cache - only recompute value once per jiffies and avoid |
@@ -205,7 +205,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | |||
205 | p = tcache->blob[1]; | 205 | p = tcache->blob[1]; |
206 | } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { | 206 | } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { |
207 | /* Load per CPU data from RDTSCP */ | 207 | /* Load per CPU data from RDTSCP */ |
208 | rdtscp(dummy, dummy, p); | 208 | native_read_tscp(&p); |
209 | } else { | 209 | } else { |
210 | /* Load per CPU data from GDT */ | 210 | /* Load per CPU data from GDT */ |
211 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); | 211 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
@@ -297,7 +297,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu) | |||
297 | /* Store cpu number in limit so that it can be loaded quickly | 297 | /* Store cpu number in limit so that it can be loaded quickly |
298 | in user space in vgetcpu. | 298 | in user space in vgetcpu. |
299 | 12 bits for the CPU and 8 bits for the node. */ | 299 | 12 bits for the CPU and 8 bits for the node. */ |
300 | d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU); | 300 | d = (unsigned long *)(get_cpu_gdt_table(cpu) + GDT_ENTRY_PER_CPU); |
301 | *d = 0x0f40000000000ULL; | 301 | *d = 0x0f40000000000ULL; |
302 | *d |= cpu; | 302 | *d |= cpu; |
303 | *d |= (node & 0xf) << 12; | 303 | *d |= (node & 0xf) << 12; |
@@ -319,7 +319,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |||
319 | return NOTIFY_DONE; | 319 | return NOTIFY_DONE; |
320 | } | 320 | } |
321 | 321 | ||
322 | static void __init map_vsyscall(void) | 322 | void __init map_vsyscall(void) |
323 | { | 323 | { |
324 | extern char __vsyscall_0; | 324 | extern char __vsyscall_0; |
325 | unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); | 325 | unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); |
@@ -335,7 +335,6 @@ static int __init vsyscall_init(void) | |||
335 | BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); | 335 | BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); |
336 | BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); | 336 | BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); |
337 | BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu)); | 337 | BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu)); |
338 | map_vsyscall(); | ||
339 | #ifdef CONFIG_SYSCTL | 338 | #ifdef CONFIG_SYSCTL |
340 | register_sysctl_table(kernel_root_table2); | 339 | register_sysctl_table(kernel_root_table2); |
341 | #endif | 340 | #endif |