aboutsummaryrefslogtreecommitdiffstats
path: root/arch/alpha/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-03 10:34:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-03 10:34:18 -0500
commit0a135ba14d71fb84c691a5386aff5049691fe6d7 (patch)
treeadb1de887dd6839d69d2fc16ffa2a10ff63298fa /arch/alpha/include/asm
parent4850f524b2c4c8a4e9f8ef4dd9c7c4afde2f2b2c (diff)
parenta29d8b8e2d811a24bbe49215a0f0c536b72ebc18 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: percpu: add __percpu sparse annotations to what's left percpu: add __percpu sparse annotations to fs percpu: add __percpu sparse annotations to core kernel subsystems local_t: Remove leftover local.h this_cpu: Remove pageset_notifier this_cpu: Page allocator conversion percpu, x86: Generic inc / dec percpu instructions local_t: Move local.h include to ringbuffer.c and ring_buffer_benchmark.c module: Use this_cpu_xx to dynamically allocate counters local_t: Remove cpu_local_xx macros percpu: refactor the code in pcpu_[de]populate_chunk() percpu: remove compile warnings caused by __verify_pcpu_ptr() percpu: make accessors check for percpu pointer in sparse percpu: add __percpu for sparse. percpu: make access macros universal percpu: remove per_cpu__ prefix.
Diffstat (limited to 'arch/alpha/include/asm')
-rw-r--r--arch/alpha/include/asm/local.h17
1 files changed, 0 insertions, 17 deletions
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea696421..b9e3e3318371 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
98#define __local_add(i,l) ((l)->a.counter+=(i)) 98#define __local_add(i,l) ((l)->a.counter+=(i))
99#define __local_sub(i,l) ((l)->a.counter-=(i)) 99#define __local_sub(i,l) ((l)->a.counter-=(i))
100 100
101/* Use these for per-cpu local_t variables: on some archs they are
102 * much more efficient than these naive implementations. Note they take
103 * a variable, not an address.
104 */
105#define cpu_local_read(l) local_read(&__get_cpu_var(l))
106#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
107
108#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
109#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
110#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
111#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
112
113#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
114#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
115#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
116#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
117
118#endif /* _ALPHA_LOCAL_H */ 101#endif /* _ALPHA_LOCAL_H */