diff options
author | Andi Kleen <ak@suse.de> | 2006-09-26 04:52:28 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:28 -0400 |
commit | 3cfc348bf90ffaa777c188652aa297f04eb94de8 (patch) | |
tree | 8908d6a5a61e54ab422ec7f4800d6ac591695423 | |
parent | c08c820508233b424deab3302bc404bbecc6493a (diff) |
[PATCH] x86: Add portable getcpu call
For NUMA optimization and some other algorithms it is useful to have a fast
to get the current CPU and node numbers in user space.
x86-64 added a fast way to do this in a vsyscall. This adds a generic
syscall for other architectures to make it a generic portable facility.
I expect some of them will also implement it as a faster vsyscall.
The cache is an optimization for the x86-64 vsyscall optimization. Since
what the syscall returns is an approximation anyways and user space
often wants very fast results it can be cached for some time. The norma
methods to get this information in user space are relatively slow
The vsyscall is in a better position to manage the cache because it has direct
access to a fast time stamp (jiffies). For the generic syscall optimization
it doesn't help much, but enforce a valid argument to keep programs
portable
I only added an i386 syscall entry for now. Other architectures can follow
as needed.
AK: Also added some cleanups from Andrew Morton
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/i386/kernel/syscall_table.S | 1 | ||||
-rw-r--r-- | arch/x86_64/ia32/ia32entry.S | 1 | ||||
-rw-r--r-- | include/asm-i386/unistd.h | 3 | ||||
-rw-r--r-- | include/linux/syscalls.h | 2 | ||||
-rw-r--r-- | kernel/sys.c | 31 |
5 files changed, 37 insertions, 1 deletions
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index dd63d4775398..7e639f78b0b9 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -317,3 +317,4 @@ ENTRY(sys_call_table) | |||
317 | .long sys_tee /* 315 */ | 317 | .long sys_tee /* 315 */ |
318 | .long sys_vmsplice | 318 | .long sys_vmsplice |
319 | .long sys_move_pages | 319 | .long sys_move_pages |
320 | .long sys_getcpu | ||
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 30ed0f6f4a2b..32fd32bea07c 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -713,4 +713,5 @@ ia32_sys_call_table: | |||
713 | .quad sys_tee | 713 | .quad sys_tee |
714 | .quad compat_sys_vmsplice | 714 | .quad compat_sys_vmsplice |
715 | .quad compat_sys_move_pages | 715 | .quad compat_sys_move_pages |
716 | .quad sys_getcpu | ||
716 | ia32_syscall_end: | 717 | ia32_syscall_end: |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index fc1c8ddae149..565d0897b205 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
@@ -323,10 +323,11 @@ | |||
323 | #define __NR_tee 315 | 323 | #define __NR_tee 315 |
324 | #define __NR_vmsplice 316 | 324 | #define __NR_vmsplice 316 |
325 | #define __NR_move_pages 317 | 325 | #define __NR_move_pages 317 |
326 | #define __NR_getcpu 318 | ||
326 | 327 | ||
327 | #ifdef __KERNEL__ | 328 | #ifdef __KERNEL__ |
328 | 329 | ||
329 | #define NR_syscalls 318 | 330 | #define NR_syscalls 319 |
330 | 331 | ||
331 | /* | 332 | /* |
332 | * user-visible error numbers are in the range -1 - -128: see | 333 | * user-visible error numbers are in the range -1 - -128: see |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 008f04c56737..3f0f716225ec 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -53,6 +53,7 @@ struct mq_attr; | |||
53 | struct compat_stat; | 53 | struct compat_stat; |
54 | struct compat_timeval; | 54 | struct compat_timeval; |
55 | struct robust_list_head; | 55 | struct robust_list_head; |
56 | struct getcpu_cache; | ||
56 | 57 | ||
57 | #include <linux/types.h> | 58 | #include <linux/types.h> |
58 | #include <linux/aio_abi.h> | 59 | #include <linux/aio_abi.h> |
@@ -596,5 +597,6 @@ asmlinkage long sys_get_robust_list(int pid, | |||
596 | size_t __user *len_ptr); | 597 | size_t __user *len_ptr); |
597 | asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, | 598 | asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, |
598 | size_t len); | 599 | size_t len); |
600 | asmlinkage long sys_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *cache); | ||
599 | 601 | ||
600 | #endif | 602 | #endif |
diff --git a/kernel/sys.c b/kernel/sys.c index e236f98f7ec5..3f894775488d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/tty.h> | 28 | #include <linux/tty.h> |
29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
30 | #include <linux/cn_proc.h> | 30 | #include <linux/cn_proc.h> |
31 | #include <linux/getcpu.h> | ||
31 | 32 | ||
32 | #include <linux/compat.h> | 33 | #include <linux/compat.h> |
33 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
@@ -2062,3 +2063,33 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
2062 | } | 2063 | } |
2063 | return error; | 2064 | return error; |
2064 | } | 2065 | } |
2066 | |||
2067 | asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, | ||
2068 | struct getcpu_cache __user *cache) | ||
2069 | { | ||
2070 | int err = 0; | ||
2071 | int cpu = raw_smp_processor_id(); | ||
2072 | if (cpup) | ||
2073 | err |= put_user(cpu, cpup); | ||
2074 | if (nodep) | ||
2075 | err |= put_user(cpu_to_node(cpu), nodep); | ||
2076 | if (cache) { | ||
2077 | /* | ||
2078 | * The cache is not needed for this implementation, | ||
2079 | * but make sure user programs pass something | ||
2080 | * valid. vsyscall implementations can instead make | ||
2081 | * good use of the cache. Only use t0 and t1 because | ||
2082 | * these are available in both 32bit and 64bit ABI (no | ||
2083 | * need for a compat_getcpu). 32bit has enough | ||
2084 | * padding | ||
2085 | */ | ||
2086 | unsigned long t0, t1; | ||
2087 | get_user(t0, &cache->t0); | ||
2088 | get_user(t1, &cache->t1); | ||
2089 | t0++; | ||
2090 | t1++; | ||
2091 | put_user(t0, &cache->t0); | ||
2092 | put_user(t1, &cache->t1); | ||
2093 | } | ||
2094 | return err ? -EFAULT : 0; | ||
2095 | } | ||