aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-09-26 04:52:28 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:28 -0400
commit3cfc348bf90ffaa777c188652aa297f04eb94de8 (patch)
tree8908d6a5a61e54ab422ec7f4800d6ac591695423 /kernel
parentc08c820508233b424deab3302bc404bbecc6493a (diff)
[PATCH] x86: Add portable getcpu call
For NUMA optimization and some other algorithms it is useful to have a fast to get the current CPU and node numbers in user space. x86-64 added a fast way to do this in a vsyscall. This adds a generic syscall for other architectures to make it a generic portable facility. I expect some of them will also implement it as a faster vsyscall. The cache is an optimization for the x86-64 vsyscall optimization. Since what the syscall returns is an approximation anyways and user space often wants very fast results it can be cached for some time. The norma methods to get this information in user space are relatively slow The vsyscall is in a better position to manage the cache because it has direct access to a fast time stamp (jiffies). For the generic syscall optimization it doesn't help much, but enforce a valid argument to keep programs portable I only added an i386 syscall entry for now. Other architectures can follow as needed. AK: Also added some cleanups from Andrew Morton Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sys.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/kernel/sys.c b/kernel/sys.c
index e236f98f7ec5..3f894775488d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -28,6 +28,7 @@
28#include <linux/tty.h> 28#include <linux/tty.h>
29#include <linux/signal.h> 29#include <linux/signal.h>
30#include <linux/cn_proc.h> 30#include <linux/cn_proc.h>
31#include <linux/getcpu.h>
31 32
32#include <linux/compat.h> 33#include <linux/compat.h>
33#include <linux/syscalls.h> 34#include <linux/syscalls.h>
@@ -2062,3 +2063,33 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
2062 } 2063 }
2063 return error; 2064 return error;
2064} 2065}
2066
2067asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
2068 struct getcpu_cache __user *cache)
2069{
2070 int err = 0;
2071 int cpu = raw_smp_processor_id();
2072 if (cpup)
2073 err |= put_user(cpu, cpup);
2074 if (nodep)
2075 err |= put_user(cpu_to_node(cpu), nodep);
2076 if (cache) {
2077 /*
2078 * The cache is not needed for this implementation,
2079 * but make sure user programs pass something
2080 * valid. vsyscall implementations can instead make
2081 * good use of the cache. Only use t0 and t1 because
2082 * these are available in both 32bit and 64bit ABI (no
2083 * need for a compat_getcpu). 32bit has enough
2084 * padding
2085 */
2086 unsigned long t0, t1;
2087 get_user(t0, &cache->t0);
2088 get_user(t1, &cache->t1);
2089 t0++;
2090 t1++;
2091 put_user(t0, &cache->t0);
2092 put_user(t1, &cache->t1);
2093 }
2094 return err ? -EFAULT : 0;
2095}