aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/vdso/vgetcpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/vdso/vgetcpu.c')
-rw-r--r--arch/x86/vdso/vgetcpu.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
index 91f6e85d0fc2..3b1ae1abfba9 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/vdso/vgetcpu.c
@@ -13,32 +13,17 @@
13#include <asm/vgtod.h> 13#include <asm/vgtod.h>
14#include "vextern.h" 14#include "vextern.h"
15 15
16long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) 16long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
17{ 17{
18 unsigned int dummy, p; 18 unsigned int dummy, p;
19 unsigned long j = 0;
20 19
21 /* Fast cache - only recompute value once per jiffies and avoid 20 if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
22 relatively costly rdtscp/cpuid otherwise.
23 This works because the scheduler usually keeps the process
24 on the same CPU and this syscall doesn't guarantee its
25 results anyways.
26 We do this here because otherwise user space would do it on
27 its own in a likely inferior way (no access to jiffies).
28 If you don't like it pass NULL. */
29 if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
30 p = tcache->blob[1];
31 } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
32 /* Load per CPU data from RDTSCP */ 21 /* Load per CPU data from RDTSCP */
33 rdtscp(dummy, dummy, p); 22 rdtscp(dummy, dummy, p);
34 } else { 23 } else {
35 /* Load per CPU data from GDT */ 24 /* Load per CPU data from GDT */
36 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 25 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
37 } 26 }
38 if (tcache) {
39 tcache->blob[0] = j;
40 tcache->blob[1] = p;
41 }
42 if (cpu) 27 if (cpu)
43 *cpu = p & 0xfff; 28 *cpu = p & 0xfff;
44 if (node) 29 if (node)