aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2007-05-08 03:34:40 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 14:15:20 -0400
commit5e97b9309baa76b476ec7e0d6e9c097edeb4142c (patch)
treef36f3410ce537babc60025ea499970d148c69b14 /include
parent2856f5e31c1413bf6e4f1371e07e17078a5fee5e (diff)
local_t: architecture independent extension
This series extena and standardises local_t operations on each architecture, allowing a rich set of atomic operations to be done on per-cpu data with minimal performance impact. On architectures where there seems to be no difference between the SMP and UP operation (same memory barriers, same LOCKing), local.h simply includes asm-generic/local.h, which removes duplicated code from the current kernel tree. This patch: local_t: architecture independent extension Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/local.h33
1 files changed, 23 insertions, 10 deletions
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index ab469297272c..33d7d04e4119 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -33,6 +33,19 @@ typedef struct
33#define local_add(i,l) atomic_long_add((i),(&(l)->a)) 33#define local_add(i,l) atomic_long_add((i),(&(l)->a))
34#define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) 34#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
35 35
36#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
37#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
38#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
39#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
40#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
41#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
42#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
43
44#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
45#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
46#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
47#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
48
36/* Non-atomic variants, ie. preemption disabled and won't be touched 49/* Non-atomic variants, ie. preemption disabled and won't be touched
37 * in interrupt, etc. Some archs can optimize this case well. */ 50 * in interrupt, etc. Some archs can optimize this case well. */
38#define __local_inc(l) local_set((l), local_read(l) + 1) 51#define __local_inc(l) local_set((l), local_read(l) + 1)
@@ -44,19 +57,19 @@ typedef struct
44 * much more efficient than these naive implementations. Note they take 57 * much more efficient than these naive implementations. Note they take
45 * a variable (eg. mystruct.foo), not an address. 58 * a variable (eg. mystruct.foo), not an address.
46 */ 59 */
47#define cpu_local_read(v) local_read(&__get_cpu_var(v)) 60#define cpu_local_read(l) local_read(&__get_cpu_var(l))
48#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) 61#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
49#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) 62#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
50#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) 63#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
51#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) 64#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
52#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) 65#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
53 66
54/* Non-atomic increments, ie. preemption disabled and won't be touched 67/* Non-atomic increments, ie. preemption disabled and won't be touched
55 * in interrupt, etc. Some archs can optimize this case well. 68 * in interrupt, etc. Some archs can optimize this case well.
56 */ 69 */
57#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) 70#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
58#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) 71#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
59#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) 72#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
60#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) 73#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
61 74
62#endif /* _ASM_GENERIC_LOCAL_H */ 75#endif /* _ASM_GENERIC_LOCAL_H */