diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-09-15 09:08:05 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-09-15 09:08:18 -0400 |
commit | e060c38434b2caa78efe7cedaff4191040b65a15 (patch) | |
tree | 407361230bf6733f63d8e788e4b5e6566ee04818 /include/asm-generic/atomic.h | |
parent | 10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff) | |
parent | cc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff) |
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches
based on more recent version of the tree.
Diffstat (limited to 'include/asm-generic/atomic.h')
-rw-r--r-- | include/asm-generic/atomic.h | 59 |
1 files changed, 47 insertions, 12 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index e994197f84b7..e37963c1df4d 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Generic C implementation of atomic counter operations | 2 | * Generic C implementation of atomic counter operations. Usable on |
3 | * UP systems only. Do not include in machine independent code. | ||
4 | * | ||
3 | * Originally implemented for MN10300. | 5 | * Originally implemented for MN10300. |
4 | * | 6 | * |
5 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | 7 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
@@ -14,7 +16,11 @@ | |||
14 | #define __ASM_GENERIC_ATOMIC_H | 16 | #define __ASM_GENERIC_ATOMIC_H |
15 | 17 | ||
16 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
17 | #error not SMP safe | 19 | /* Force people to define core atomics */ |
20 | # if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ | ||
21 | !defined(atomic_clear_mask) || !defined(atomic_set_mask) | ||
22 | # error "SMP requires a little arch-specific magic" | ||
23 | # endif | ||
18 | #endif | 24 | #endif |
19 | 25 | ||
20 | /* | 26 | /* |
@@ -32,7 +38,9 @@ | |||
32 | * | 38 | * |
33 | * Atomically reads the value of @v. | 39 | * Atomically reads the value of @v. |
34 | */ | 40 | */ |
41 | #ifndef atomic_read | ||
35 | #define atomic_read(v) (*(volatile int *)&(v)->counter) | 42 | #define atomic_read(v) (*(volatile int *)&(v)->counter) |
43 | #endif | ||
36 | 44 | ||
37 | /** | 45 | /** |
38 | * atomic_set - set atomic variable | 46 | * atomic_set - set atomic variable |
@@ -53,6 +61,7 @@ | |||
53 | * | 61 | * |
54 | * Atomically adds @i to @v and returns the result | 62 | * Atomically adds @i to @v and returns the result |
55 | */ | 63 | */ |
64 | #ifndef atomic_add_return | ||
56 | static inline int atomic_add_return(int i, atomic_t *v) | 65 | static inline int atomic_add_return(int i, atomic_t *v) |
57 | { | 66 | { |
58 | unsigned long flags; | 67 | unsigned long flags; |
@@ -66,6 +75,7 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
66 | 75 | ||
67 | return temp; | 76 | return temp; |
68 | } | 77 | } |
78 | #endif | ||
69 | 79 | ||
70 | /** | 80 | /** |
71 | * atomic_sub_return - subtract integer from atomic variable | 81 | * atomic_sub_return - subtract integer from atomic variable |
@@ -74,6 +84,7 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
74 | * | 84 | * |
75 | * Atomically subtracts @i from @v and returns the result | 85 | * Atomically subtracts @i from @v and returns the result |
76 | */ | 86 | */ |
87 | #ifndef atomic_sub_return | ||
77 | static inline int atomic_sub_return(int i, atomic_t *v) | 88 | static inline int atomic_sub_return(int i, atomic_t *v) |
78 | { | 89 | { |
79 | unsigned long flags; | 90 | unsigned long flags; |
@@ -87,6 +98,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
87 | 98 | ||
88 | return temp; | 99 | return temp; |
89 | } | 100 | } |
101 | #endif | ||
90 | 102 | ||
91 | static inline int atomic_add_negative(int i, atomic_t *v) | 103 | static inline int atomic_add_negative(int i, atomic_t *v) |
92 | { | 104 | { |
@@ -117,8 +129,8 @@ static inline void atomic_dec(atomic_t *v) | |||
117 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | 129 | #define atomic_inc_return(v) atomic_add_return(1, (v)) |
118 | 130 | ||
119 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | 131 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) |
120 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | 132 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) |
121 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | 133 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) |
122 | 134 | ||
123 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | 135 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) |
124 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | 136 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
@@ -129,26 +141,51 @@ static inline void atomic_dec(atomic_t *v) | |||
129 | 141 | ||
130 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 142 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
131 | 143 | ||
132 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | 144 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
133 | { | 145 | { |
134 | int c, old; | 146 | int c, old; |
135 | c = atomic_read(v); | 147 | c = atomic_read(v); |
136 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | 148 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) |
137 | c = old; | 149 | c = old; |
138 | return c != u; | 150 | return c; |
139 | } | 151 | } |
140 | 152 | ||
141 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 153 | /** |
142 | 154 | * atomic_clear_mask - Atomically clear bits in atomic variable | |
143 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | 155 | * @mask: Mask of the bits to be cleared |
156 | * @v: pointer of type atomic_t | ||
157 | * | ||
158 | * Atomically clears the bits set in @mask from @v | ||
159 | */ | ||
160 | #ifndef atomic_clear_mask | ||
161 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) | ||
144 | { | 162 | { |
145 | unsigned long flags; | 163 | unsigned long flags; |
146 | 164 | ||
147 | mask = ~mask; | 165 | mask = ~mask; |
148 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | 166 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ |
149 | *addr &= mask; | 167 | v->counter &= mask; |
150 | raw_local_irq_restore(flags); | 168 | raw_local_irq_restore(flags); |
151 | } | 169 | } |
170 | #endif | ||
171 | |||
172 | /** | ||
173 | * atomic_set_mask - Atomically set bits in atomic variable | ||
174 | * @mask: Mask of the bits to be set | ||
175 | * @v: pointer of type atomic_t | ||
176 | * | ||
177 | * Atomically sets the bits set in @mask in @v | ||
178 | */ | ||
179 | #ifndef atomic_set_mask | ||
180 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
181 | { | ||
182 | unsigned long flags; | ||
183 | |||
184 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | ||
185 | v->counter |= mask; | ||
186 | raw_local_irq_restore(flags); | ||
187 | } | ||
188 | #endif | ||
152 | 189 | ||
153 | /* Assume that atomic operations are already serializing */ | 190 | /* Assume that atomic operations are already serializing */ |
154 | #define smp_mb__before_atomic_dec() barrier() | 191 | #define smp_mb__before_atomic_dec() barrier() |
@@ -156,7 +193,5 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
156 | #define smp_mb__before_atomic_inc() barrier() | 193 | #define smp_mb__before_atomic_inc() barrier() |
157 | #define smp_mb__after_atomic_inc() barrier() | 194 | #define smp_mb__after_atomic_inc() barrier() |
158 | 195 | ||
159 | #include <asm-generic/atomic-long.h> | ||
160 | |||
161 | #endif /* __KERNEL__ */ | 196 | #endif /* __KERNEL__ */ |
162 | #endif /* __ASM_GENERIC_ATOMIC_H */ | 197 | #endif /* __ASM_GENERIC_ATOMIC_H */ |