diff options
Diffstat (limited to 'arch/x86_64/kernel/vsyscall.c')
-rw-r--r-- | arch/x86_64/kernel/vsyscall.c | 84 |
1 files changed, 82 insertions, 2 deletions
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c index f603037df16..902783bc4d5 100644 --- a/arch/x86_64/kernel/vsyscall.c +++ b/arch/x86_64/kernel/vsyscall.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/seqlock.h> | 26 | #include <linux/seqlock.h> |
27 | #include <linux/jiffies.h> | 27 | #include <linux/jiffies.h> |
28 | #include <linux/sysctl.h> | 28 | #include <linux/sysctl.h> |
29 | #include <linux/getcpu.h> | ||
29 | 30 | ||
30 | #include <asm/vsyscall.h> | 31 | #include <asm/vsyscall.h> |
31 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
@@ -33,11 +34,15 @@ | |||
33 | #include <asm/fixmap.h> | 34 | #include <asm/fixmap.h> |
34 | #include <asm/errno.h> | 35 | #include <asm/errno.h> |
35 | #include <asm/io.h> | 36 | #include <asm/io.h> |
37 | #include <asm/segment.h> | ||
38 | #include <asm/desc.h> | ||
39 | #include <asm/topology.h> | ||
36 | 40 | ||
37 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) | 41 | #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) |
38 | 42 | ||
39 | int __sysctl_vsyscall __section_sysctl_vsyscall = 1; | 43 | int __sysctl_vsyscall __section_sysctl_vsyscall = 1; |
40 | seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED; | 44 | seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED; |
45 | int __vgetcpu_mode __section_vgetcpu_mode; | ||
41 | 46 | ||
42 | #include <asm/unistd.h> | 47 | #include <asm/unistd.h> |
43 | 48 | ||
@@ -127,9 +132,46 @@ time_t __vsyscall(1) vtime(time_t *t) | |||
127 | return __xtime.tv_sec; | 132 | return __xtime.tv_sec; |
128 | } | 133 | } |
129 | 134 | ||
130 | long __vsyscall(2) venosys_0(void) | 135 | /* Fast way to get current CPU and node. |
136 | This helps to do per node and per CPU caches in user space. | ||
137 | The result is not guaranteed without CPU affinity, but usually | ||
138 | works out because the scheduler tries to keep a thread on the same | ||
139 | CPU. | ||
140 | |||
141 | tcache must point to a two element sized long array. | ||
142 | All arguments can be NULL. */ | ||
143 | long __vsyscall(2) | ||
144 | vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | ||
131 | { | 145 | { |
132 | return -ENOSYS; | 146 | unsigned int dummy, p; |
147 | unsigned long j = 0; | ||
148 | |||
149 | /* Fast cache - only recompute value once per jiffies and avoid | ||
150 | relatively costly rdtscp/cpuid otherwise. | ||
151 | This works because the scheduler usually keeps the process | ||
152 | on the same CPU and this syscall doesn't guarantee its | ||
153 | results anyways. | ||
154 | We do this here because otherwise user space would do it on | ||
155 | its own in a likely inferior way (no access to jiffies). | ||
156 | If you don't like it pass NULL. */ | ||
157 | if (tcache && tcache->t0 == (j = __jiffies)) { | ||
158 | p = tcache->t1; | ||
159 | } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { | ||
160 | /* Load per CPU data from RDTSCP */ | ||
161 | rdtscp(dummy, dummy, p); | ||
162 | } else { | ||
163 | /* Load per CPU data from GDT */ | ||
164 | asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); | ||
165 | } | ||
166 | if (tcache) { | ||
167 | tcache->t0 = j; | ||
168 | tcache->t1 = p; | ||
169 | } | ||
170 | if (cpu) | ||
171 | *cpu = p & 0xfff; | ||
172 | if (node) | ||
173 | *node = p >> 12; | ||
174 | return 0; | ||
133 | } | 175 | } |
134 | 176 | ||
135 | long __vsyscall(3) venosys_1(void) | 177 | long __vsyscall(3) venosys_1(void) |
@@ -200,6 +242,43 @@ static ctl_table kernel_root_table2[] = { | |||
200 | 242 | ||
201 | #endif | 243 | #endif |
202 | 244 | ||
245 | static void __cpuinit write_rdtscp_cb(void *info) | ||
246 | { | ||
247 | write_rdtscp_aux((unsigned long)info); | ||
248 | } | ||
249 | |||
250 | void __cpuinit vsyscall_set_cpu(int cpu) | ||
251 | { | ||
252 | unsigned long *d; | ||
253 | unsigned long node = 0; | ||
254 | #ifdef CONFIG_NUMA | ||
255 | node = cpu_to_node[cpu]; | ||
256 | #endif | ||
257 | if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) { | ||
258 | void *info = (void *)((node << 12) | cpu); | ||
259 | /* Can happen on preemptive kernel */ | ||
260 | if (get_cpu() == cpu) | ||
261 | write_rdtscp_cb(info); | ||
262 | #ifdef CONFIG_SMP | ||
263 | else { | ||
264 | /* the notifier is unfortunately not executed on the | ||
265 | target CPU */ | ||
266 | smp_call_function_single(cpu,write_rdtscp_cb,info,0,1); | ||
267 | } | ||
268 | #endif | ||
269 | put_cpu(); | ||
270 | } | ||
271 | |||
272 | /* Store cpu number in limit so that it can be loaded quickly | ||
273 | in user space in vgetcpu. | ||
274 | 12 bits for the CPU and 8 bits for the node. */ | ||
275 | d = (unsigned long *)(cpu_gdt(cpu) + GDT_ENTRY_PER_CPU); | ||
276 | *d = 0x0f40000000000ULL; | ||
277 | *d |= cpu; | ||
278 | *d |= (node & 0xf) << 12; | ||
279 | *d |= (node >> 4) << 48; | ||
280 | } | ||
281 | |||
203 | static void __init map_vsyscall(void) | 282 | static void __init map_vsyscall(void) |
204 | { | 283 | { |
205 | extern char __vsyscall_0; | 284 | extern char __vsyscall_0; |
@@ -214,6 +293,7 @@ static int __init vsyscall_init(void) | |||
214 | VSYSCALL_ADDR(__NR_vgettimeofday))); | 293 | VSYSCALL_ADDR(__NR_vgettimeofday))); |
215 | BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); | 294 | BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime)); |
216 | BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); | 295 | BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE))); |
296 | BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu)); | ||
217 | map_vsyscall(); | 297 | map_vsyscall(); |
218 | #ifdef CONFIG_SYSCTL | 298 | #ifdef CONFIG_SYSCTL |
219 | register_sysctl_table(kernel_root_table2, 0); | 299 | register_sysctl_table(kernel_root_table2, 0); |