aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/vdso/vgetcpu.c19
-rw-r--r--kernel/sys.c20
2 files changed, 3 insertions, 36 deletions
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
index 91f6e85d0fc2..3b1ae1abfba9 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/vdso/vgetcpu.c
@@ -13,32 +13,17 @@
13#include <asm/vgtod.h> 13#include <asm/vgtod.h>
14#include "vextern.h" 14#include "vextern.h"
15 15
16long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) 16long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
17{ 17{
18 unsigned int dummy, p; 18 unsigned int dummy, p;
19 unsigned long j = 0;
20 19
21 /* Fast cache - only recompute value once per jiffies and avoid 20 if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
22 relatively costly rdtscp/cpuid otherwise.
23 This works because the scheduler usually keeps the process
24 on the same CPU and this syscall doesn't guarantee its
25 results anyways.
26 We do this here because otherwise user space would do it on
27 its own in a likely inferior way (no access to jiffies).
28 If you don't like it pass NULL. */
29 if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
30 p = tcache->blob[1];
31 } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
32 /* Load per CPU data from RDTSCP */ 21 /* Load per CPU data from RDTSCP */
33 rdtscp(dummy, dummy, p); 22 rdtscp(dummy, dummy, p);
34 } else { 23 } else {
35 /* Load per CPU data from GDT */ 24 /* Load per CPU data from GDT */
36 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); 25 asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
37 } 26 }
38 if (tcache) {
39 tcache->blob[0] = j;
40 tcache->blob[1] = p;
41 }
42 if (cpu) 27 if (cpu)
43 *cpu = p & 0xfff; 28 *cpu = p & 0xfff;
44 if (node) 29 if (node)
diff --git a/kernel/sys.c b/kernel/sys.c
index 304b5410d746..d1fe71eb4546 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1750} 1750}
1751 1751
1752asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, 1752asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
1753 struct getcpu_cache __user *cache) 1753 struct getcpu_cache __user *unused)
1754{ 1754{
1755 int err = 0; 1755 int err = 0;
1756 int cpu = raw_smp_processor_id(); 1756 int cpu = raw_smp_processor_id();
@@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
1758 err |= put_user(cpu, cpup); 1758 err |= put_user(cpu, cpup);
1759 if (nodep) 1759 if (nodep)
1760 err |= put_user(cpu_to_node(cpu), nodep); 1760 err |= put_user(cpu_to_node(cpu), nodep);
1761 if (cache) {
1762 /*
1763 * The cache is not needed for this implementation,
1764 * but make sure user programs pass something
1765 * valid. vsyscall implementations can instead make
1766 * good use of the cache. Only use t0 and t1 because
1767 * these are available in both 32bit and 64bit ABI (no
1768 * need for a compat_getcpu). 32bit has enough
1769 * padding
1770 */
1771 unsigned long t0, t1;
1772 get_user(t0, &cache->blob[0]);
1773 get_user(t1, &cache->blob[1]);
1774 t0++;
1775 t1++;
1776 put_user(t0, &cache->blob[0]);
1777 put_user(t1, &cache->blob[1]);
1778 }
1779 return err ? -EFAULT : 0; 1761 return err ? -EFAULT : 0;
1780} 1762}
1781 1763