diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 10:12:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-08-14 06:48:14 -0400 |
commit | 560cb12a4080a48b84da8b96878cafbd193c4d64 (patch) | |
tree | e0b28be89d66e8a01b164b7c6123e918cafcc79c | |
parent | d4608dd5b4ec13855680b89f719d8d4b2da92411 (diff) |
locking,arch: Rewrite generic atomic support
Rewrite generic atomic support to only require cmpxchg(), generate all
other primitives from that.
Furthermore reduce the endless repetition for all these primitives to
a few CPP macros. This way we get more for less lines.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20140508135852.940119622@infradead.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-arch@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/asm-generic/atomic.h | 192 | ||||
-rw-r--r-- | include/asm-generic/atomic64.h | 20 | ||||
-rw-r--r-- | lib/atomic64.c | 83 |
3 files changed, 148 insertions, 147 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 9c79e7603459..56d4d36e1531 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -18,14 +18,100 @@ | |||
18 | #include <asm/cmpxchg.h> | 18 | #include <asm/cmpxchg.h> |
19 | #include <asm/barrier.h> | 19 | #include <asm/barrier.h> |
20 | 20 | ||
21 | /* | ||
22 | * atomic_$op() - $op integer to atomic variable | ||
23 | * @i: integer value to $op | ||
24 | * @v: pointer to the atomic variable | ||
25 | * | ||
26 | * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use | ||
27 | * smp_mb__{before,after}_atomic(). | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * atomic_$op_return() - $op interer to atomic variable and returns the result | ||
32 | * @i: integer value to $op | ||
33 | * @v: pointer to the atomic variable | ||
34 | * | ||
35 | * Atomically $ops @i to @v. Does imply a full memory barrier. | ||
36 | */ | ||
37 | |||
21 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
22 | /* Force people to define core atomics */ | 39 | |
23 | # if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ | 40 | /* we can build all atomic primitives from cmpxchg */ |
24 | !defined(atomic_clear_mask) || !defined(atomic_set_mask) | 41 | |
25 | # error "SMP requires a little arch-specific magic" | 42 | #define ATOMIC_OP(op, c_op) \ |
26 | # endif | 43 | static inline void atomic_##op(int i, atomic_t *v) \ |
44 | { \ | ||
45 | int c, old; \ | ||
46 | \ | ||
47 | c = v->counter; \ | ||
48 | while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ | ||
49 | c = old; \ | ||
50 | } | ||
51 | |||
52 | #define ATOMIC_OP_RETURN(op, c_op) \ | ||
53 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | ||
54 | { \ | ||
55 | int c, old; \ | ||
56 | \ | ||
57 | c = v->counter; \ | ||
58 | while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ | ||
59 | c = old; \ | ||
60 | \ | ||
61 | return c c_op i; \ | ||
62 | } | ||
63 | |||
64 | #else | ||
65 | |||
66 | #include <linux/irqflags.h> | ||
67 | |||
68 | #define ATOMIC_OP(op, c_op) \ | ||
69 | static inline void atomic_##op(int i, atomic_t *v) \ | ||
70 | { \ | ||
71 | unsigned long flags; \ | ||
72 | \ | ||
73 | raw_local_irq_save(flags); \ | ||
74 | v->counter = v->counter c_op i; \ | ||
75 | raw_local_irq_restore(flags); \ | ||
76 | } | ||
77 | |||
78 | #define ATOMIC_OP_RETURN(op, c_op) \ | ||
79 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | ||
80 | { \ | ||
81 | unsigned long flags; \ | ||
82 | int ret; \ | ||
83 | \ | ||
84 | raw_local_irq_save(flags); \ | ||
85 | ret = (v->counter = v->counter c_op i); \ | ||
86 | raw_local_irq_restore(flags); \ | ||
87 | \ | ||
88 | return ret; \ | ||
89 | } | ||
90 | |||
91 | #endif /* CONFIG_SMP */ | ||
92 | |||
93 | #ifndef atomic_add_return | ||
94 | ATOMIC_OP_RETURN(add, +) | ||
95 | #endif | ||
96 | |||
97 | #ifndef atomic_sub_return | ||
98 | ATOMIC_OP_RETURN(sub, -) | ||
99 | #endif | ||
100 | |||
101 | #ifndef atomic_clear_mask | ||
102 | ATOMIC_OP(and, &) | ||
103 | #define atomic_clear_mask(i, v) atomic_and(~(i), (v)) | ||
27 | #endif | 104 | #endif |
28 | 105 | ||
106 | #ifndef atomic_set_mask | ||
107 | #define CONFIG_ARCH_HAS_ATOMIC_OR | ||
108 | ATOMIC_OP(or, |) | ||
109 | #define atomic_set_mask(i, v) atomic_or((i), (v)) | ||
110 | #endif | ||
111 | |||
112 | #undef ATOMIC_OP_RETURN | ||
113 | #undef ATOMIC_OP | ||
114 | |||
29 | /* | 115 | /* |
30 | * Atomic operations that C can't guarantee us. Useful for | 116 | * Atomic operations that C can't guarantee us. Useful for |
31 | * resource counting etc.. | 117 | * resource counting etc.. |
@@ -33,8 +119,6 @@ | |||
33 | 119 | ||
34 | #define ATOMIC_INIT(i) { (i) } | 120 | #define ATOMIC_INIT(i) { (i) } |
35 | 121 | ||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | /** | 122 | /** |
39 | * atomic_read - read atomic variable | 123 | * atomic_read - read atomic variable |
40 | * @v: pointer of type atomic_t | 124 | * @v: pointer of type atomic_t |
@@ -56,52 +140,6 @@ | |||
56 | 140 | ||
57 | #include <linux/irqflags.h> | 141 | #include <linux/irqflags.h> |
58 | 142 | ||
59 | /** | ||
60 | * atomic_add_return - add integer to atomic variable | ||
61 | * @i: integer value to add | ||
62 | * @v: pointer of type atomic_t | ||
63 | * | ||
64 | * Atomically adds @i to @v and returns the result | ||
65 | */ | ||
66 | #ifndef atomic_add_return | ||
67 | static inline int atomic_add_return(int i, atomic_t *v) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | int temp; | ||
71 | |||
72 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ | ||
73 | temp = v->counter; | ||
74 | temp += i; | ||
75 | v->counter = temp; | ||
76 | raw_local_irq_restore(flags); | ||
77 | |||
78 | return temp; | ||
79 | } | ||
80 | #endif | ||
81 | |||
82 | /** | ||
83 | * atomic_sub_return - subtract integer from atomic variable | ||
84 | * @i: integer value to subtract | ||
85 | * @v: pointer of type atomic_t | ||
86 | * | ||
87 | * Atomically subtracts @i from @v and returns the result | ||
88 | */ | ||
89 | #ifndef atomic_sub_return | ||
90 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | int temp; | ||
94 | |||
95 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ | ||
96 | temp = v->counter; | ||
97 | temp -= i; | ||
98 | v->counter = temp; | ||
99 | raw_local_irq_restore(flags); | ||
100 | |||
101 | return temp; | ||
102 | } | ||
103 | #endif | ||
104 | |||
105 | static inline int atomic_add_negative(int i, atomic_t *v) | 143 | static inline int atomic_add_negative(int i, atomic_t *v) |
106 | { | 144 | { |
107 | return atomic_add_return(i, v) < 0; | 145 | return atomic_add_return(i, v) < 0; |
@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v) | |||
139 | 177 | ||
140 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | 178 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
141 | { | 179 | { |
142 | int c, old; | 180 | int c, old; |
143 | c = atomic_read(v); | 181 | c = atomic_read(v); |
144 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | 182 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) |
145 | c = old; | 183 | c = old; |
146 | return c; | 184 | return c; |
147 | } | ||
148 | |||
149 | /** | ||
150 | * atomic_clear_mask - Atomically clear bits in atomic variable | ||
151 | * @mask: Mask of the bits to be cleared | ||
152 | * @v: pointer of type atomic_t | ||
153 | * | ||
154 | * Atomically clears the bits set in @mask from @v | ||
155 | */ | ||
156 | #ifndef atomic_clear_mask | ||
157 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | |||
161 | mask = ~mask; | ||
162 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | ||
163 | v->counter &= mask; | ||
164 | raw_local_irq_restore(flags); | ||
165 | } | 185 | } |
166 | #endif | ||
167 | |||
168 | /** | ||
169 | * atomic_set_mask - Atomically set bits in atomic variable | ||
170 | * @mask: Mask of the bits to be set | ||
171 | * @v: pointer of type atomic_t | ||
172 | * | ||
173 | * Atomically sets the bits set in @mask in @v | ||
174 | */ | ||
175 | #ifndef atomic_set_mask | ||
176 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
177 | { | ||
178 | unsigned long flags; | ||
179 | |||
180 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | ||
181 | v->counter |= mask; | ||
182 | raw_local_irq_restore(flags); | ||
183 | } | ||
184 | #endif | ||
185 | 186 | ||
186 | #endif /* __KERNEL__ */ | ||
187 | #endif /* __ASM_GENERIC_ATOMIC_H */ | 187 | #endif /* __ASM_GENERIC_ATOMIC_H */ |
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index b18ce4f9ee3d..30ad9c86cebb 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h | |||
@@ -20,10 +20,22 @@ typedef struct { | |||
20 | 20 | ||
21 | extern long long atomic64_read(const atomic64_t *v); | 21 | extern long long atomic64_read(const atomic64_t *v); |
22 | extern void atomic64_set(atomic64_t *v, long long i); | 22 | extern void atomic64_set(atomic64_t *v, long long i); |
23 | extern void atomic64_add(long long a, atomic64_t *v); | 23 | |
24 | extern long long atomic64_add_return(long long a, atomic64_t *v); | 24 | #define ATOMIC64_OP(op) \ |
25 | extern void atomic64_sub(long long a, atomic64_t *v); | 25 | extern void atomic64_##op(long long a, atomic64_t *v); |
26 | extern long long atomic64_sub_return(long long a, atomic64_t *v); | 26 | |
27 | #define ATOMIC64_OP_RETURN(op) \ | ||
28 | extern long long atomic64_##op##_return(long long a, atomic64_t *v); | ||
29 | |||
30 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) | ||
31 | |||
32 | ATOMIC64_OPS(add) | ||
33 | ATOMIC64_OPS(sub) | ||
34 | |||
35 | #undef ATOMIC64_OPS | ||
36 | #undef ATOMIC64_OP_RETURN | ||
37 | #undef ATOMIC64_OP | ||
38 | |||
27 | extern long long atomic64_dec_if_positive(atomic64_t *v); | 39 | extern long long atomic64_dec_if_positive(atomic64_t *v); |
28 | extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); | 40 | extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); |
29 | extern long long atomic64_xchg(atomic64_t *v, long long new); | 41 | extern long long atomic64_xchg(atomic64_t *v, long long new); |
diff --git a/lib/atomic64.c b/lib/atomic64.c index 08a4f068e61e..1298c05ef528 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i) | |||
70 | } | 70 | } |
71 | EXPORT_SYMBOL(atomic64_set); | 71 | EXPORT_SYMBOL(atomic64_set); |
72 | 72 | ||
73 | void atomic64_add(long long a, atomic64_t *v) | 73 | #define ATOMIC64_OP(op, c_op) \ |
74 | { | 74 | void atomic64_##op(long long a, atomic64_t *v) \ |
75 | unsigned long flags; | 75 | { \ |
76 | raw_spinlock_t *lock = lock_addr(v); | 76 | unsigned long flags; \ |
77 | 77 | raw_spinlock_t *lock = lock_addr(v); \ | |
78 | raw_spin_lock_irqsave(lock, flags); | 78 | \ |
79 | v->counter += a; | 79 | raw_spin_lock_irqsave(lock, flags); \ |
80 | raw_spin_unlock_irqrestore(lock, flags); | 80 | v->counter c_op a; \ |
81 | } | 81 | raw_spin_unlock_irqrestore(lock, flags); \ |
82 | EXPORT_SYMBOL(atomic64_add); | 82 | } \ |
83 | 83 | EXPORT_SYMBOL(atomic64_##op); | |
84 | long long atomic64_add_return(long long a, atomic64_t *v) | 84 | |
85 | { | 85 | #define ATOMIC64_OP_RETURN(op, c_op) \ |
86 | unsigned long flags; | 86 | long long atomic64_##op##_return(long long a, atomic64_t *v) \ |
87 | raw_spinlock_t *lock = lock_addr(v); | 87 | { \ |
88 | long long val; | 88 | unsigned long flags; \ |
89 | 89 | raw_spinlock_t *lock = lock_addr(v); \ | |
90 | raw_spin_lock_irqsave(lock, flags); | 90 | long long val; \ |
91 | val = v->counter += a; | 91 | \ |
92 | raw_spin_unlock_irqrestore(lock, flags); | 92 | raw_spin_lock_irqsave(lock, flags); \ |
93 | return val; | 93 | val = (v->counter c_op a); \ |
94 | } | 94 | raw_spin_unlock_irqrestore(lock, flags); \ |
95 | EXPORT_SYMBOL(atomic64_add_return); | 95 | return val; \ |
96 | 96 | } \ | |
97 | void atomic64_sub(long long a, atomic64_t *v) | 97 | EXPORT_SYMBOL(atomic64_##op##_return); |
98 | { | 98 | |
99 | unsigned long flags; | 99 | #define ATOMIC64_OPS(op, c_op) \ |
100 | raw_spinlock_t *lock = lock_addr(v); | 100 | ATOMIC64_OP(op, c_op) \ |
101 | 101 | ATOMIC64_OP_RETURN(op, c_op) | |
102 | raw_spin_lock_irqsave(lock, flags); | 102 | |
103 | v->counter -= a; | 103 | ATOMIC64_OPS(add, +=) |
104 | raw_spin_unlock_irqrestore(lock, flags); | 104 | ATOMIC64_OPS(sub, -=) |
105 | } | 105 | |
106 | EXPORT_SYMBOL(atomic64_sub); | 106 | #undef ATOMIC64_OPS |
107 | 107 | #undef ATOMIC64_OP_RETURN | |
108 | long long atomic64_sub_return(long long a, atomic64_t *v) | 108 | #undef ATOMIC64_OP |
109 | { | ||
110 | unsigned long flags; | ||
111 | raw_spinlock_t *lock = lock_addr(v); | ||
112 | long long val; | ||
113 | |||
114 | raw_spin_lock_irqsave(lock, flags); | ||
115 | val = v->counter -= a; | ||
116 | raw_spin_unlock_irqrestore(lock, flags); | ||
117 | return val; | ||
118 | } | ||
119 | EXPORT_SYMBOL(atomic64_sub_return); | ||
120 | 109 | ||
121 | long long atomic64_dec_if_positive(atomic64_t *v) | 110 | long long atomic64_dec_if_positive(atomic64_t *v) |
122 | { | 111 | { |