aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/local64.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/local64.h')
-rw-r--r--include/asm-generic/local64.h96
1 files changed, 96 insertions, 0 deletions
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
new file mode 100644
index 000000000000..02ac760c1a8b
--- /dev/null
+++ b/include/asm-generic/local64.h
@@ -0,0 +1,96 @@
1#ifndef _ASM_GENERIC_LOCAL64_H
2#define _ASM_GENERIC_LOCAL64_H
3
4#include <linux/percpu.h>
5#include <asm/types.h>
6
7/*
8 * A signed long type for operations which are atomic for a single CPU.
9 * Usually used in combination with per-cpu variables.
10 *
11 * This is the default implementation, which uses atomic64_t. Which is
12 * rather pointless. The whole point behind local64_t is that some processors
13 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
14 * running on this CPU. local64_t allows exploitation of such capabilities.
15 */
16
17/* Implement in terms of atomics. */
18
19#if BITS_PER_LONG == 64
20
21#include <asm/local.h>
22
23typedef struct {
24 local_t a;
25} local64_t;
26
27#define LOCAL64_INIT(i) { LOCAL_INIT(i) }
28
29#define local64_read(l) local_read(&(l)->a)
30#define local64_set(l,i) local_set((&(l)->a),(i))
31#define local64_inc(l) local_inc(&(l)->a)
32#define local64_dec(l) local_dec(&(l)->a)
33#define local64_add(i,l) local_add((i),(&(l)->a))
34#define local64_sub(i,l) local_sub((i),(&(l)->a))
35
36#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a))
37#define local64_dec_and_test(l) local_dec_and_test(&(l)->a)
38#define local64_inc_and_test(l) local_inc_and_test(&(l)->a)
39#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a))
40#define local64_add_return(i, l) local_add_return((i), (&(l)->a))
41#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
42#define local64_inc_return(l) local_inc_return(&(l)->a)
43
44#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
45#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
46#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
47#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
48
49/* Non-atomic variants, ie. preemption disabled and won't be touched
50 * in interrupt, etc. Some archs can optimize this case well. */
51#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
52#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
53#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
54#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
55
56#else /* BITS_PER_LONG != 64 */
57
58#include <asm/atomic.h>
59
60/* Don't use typedef: don't want them to be mixed with atomic_t's. */
61typedef struct {
62 atomic64_t a;
63} local64_t;
64
65#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) }
66
67#define local64_read(l) atomic64_read(&(l)->a)
68#define local64_set(l,i) atomic64_set((&(l)->a),(i))
69#define local64_inc(l) atomic64_inc(&(l)->a)
70#define local64_dec(l) atomic64_dec(&(l)->a)
71#define local64_add(i,l) atomic64_add((i),(&(l)->a))
72#define local64_sub(i,l) atomic64_sub((i),(&(l)->a))
73
74#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a))
75#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a)
76#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a)
77#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a))
78#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a))
79#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a))
80#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
81
82#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
83#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
84#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
85#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
86
87/* Non-atomic variants, ie. preemption disabled and won't be touched
88 * in interrupt, etc. Some archs can optimize this case well. */
89#define __local64_inc(l) local64_set((l), local64_read(l) + 1)
90#define __local64_dec(l) local64_set((l), local64_read(l) - 1)
91#define __local64_add(i,l) local64_set((l), local64_read(l) + (i))
92#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i))
93
94#endif /* BITS_PER_LONG != 64 */
95
96#endif /* _ASM_GENERIC_LOCAL64_H */