aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/atomic_ops.txt15
-rw-r--r--arch/sparc/lib/atomic32.c21
-rw-r--r--include/asm-alpha/atomic.h2
-rw-r--r--include/asm-arm/atomic.h31
-rw-r--r--include/asm-arm26/atomic.h14
-rw-r--r--include/asm-cris/atomic.h13
-rw-r--r--include/asm-frv/atomic.h2
-rw-r--r--include/asm-h8300/atomic.h13
-rw-r--r--include/asm-i386/atomic.h2
-rw-r--r--include/asm-ia64/atomic.h2
-rw-r--r--include/asm-m68k/atomic.h2
-rw-r--r--include/asm-m68knommu/atomic.h2
-rw-r--r--include/asm-mips/atomic.h2
-rw-r--r--include/asm-parisc/atomic.h1
-rw-r--r--include/asm-powerpc/atomic.h2
-rw-r--r--include/asm-s390/atomic.h2
-rw-r--r--include/asm-sh/atomic.h14
-rw-r--r--include/asm-sh64/atomic.h14
-rw-r--r--include/asm-sparc/atomic.h1
-rw-r--r--include/asm-sparc64/atomic.h2
-rw-r--r--include/asm-v850/atomic.h14
-rw-r--r--include/asm-x86_64/atomic.h2
-rw-r--r--include/asm-xtensa/atomic.h1
23 files changed, 169 insertions, 5 deletions
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 8eedaa24f5e2..f1744161ef06 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -115,6 +115,21 @@ boolean is return which indicates whether the resulting counter value
115is negative. It requires explicit memory barrier semantics around the 115is negative. It requires explicit memory barrier semantics around the
116operation. 116operation.
117 117
118Finally:
119
120 int atomic_cmpxchg(atomic_t *v, int old, int new);
121
122This performs an atomic compare exchange operation on the atomic value v,
123with the given old and new values. Like all atomic_xxx operations,
124atomic_cmpxchg will only satisfy its atomicity semantics as long as all
125other accesses of *v are performed through atomic_xxx operations.
126
127atomic_cmpxchg requires explicit memory barriers around the operation.
128
129The semantics for atomic_cmpxchg are the same as those defined for 'cas'
130below.
131
132
118If a caller requires memory barrier semantics around an atomic_t 133If a caller requires memory barrier semantics around an atomic_t
119operation which does not return a value, a set of interfaces are 134operation which does not return a value, a set of interfaces are
120defined which accomplish this: 135defined which accomplish this:
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 2e64e8c3e8e5..be46f6545184 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -37,17 +37,28 @@ int __atomic_add_return(int i, atomic_t *v)
37 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 37 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
38 return ret; 38 return ret;
39} 39}
40EXPORT_SYMBOL(__atomic_add_return);
40 41
41void atomic_set(atomic_t *v, int i) 42int atomic_cmpxchg(atomic_t *v, int old, int new)
42{ 43{
44 int ret;
43 unsigned long flags; 45 unsigned long flags;
44 spin_lock_irqsave(ATOMIC_HASH(v), flags);
45 46
46 v->counter = i; 47 spin_lock_irqsave(ATOMIC_HASH(v), flags);
48 ret = v->counter;
49 if (likely(ret == old))
50 v->counter = new;
47 51
48 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 52 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
53 return ret;
49} 54}
50 55
51EXPORT_SYMBOL(__atomic_add_return); 56void atomic_set(atomic_t *v, int i)
52EXPORT_SYMBOL(atomic_set); 57{
58 unsigned long flags;
53 59
60 spin_lock_irqsave(ATOMIC_HASH(v), flags);
61 v->counter = i;
62 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
63}
64EXPORT_SYMBOL(atomic_set);
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 20ac3d95ecd9..a6660809a879 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -177,6 +177,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
177 return result; 177 return result;
178} 178}
179 179
180#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
181
180#define atomic_dec_return(v) atomic_sub_return(1,(v)) 182#define atomic_dec_return(v) atomic_sub_return(1,(v))
181#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 183#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
182 184
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index 2885972b0855..8ab1689ef56a 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -80,6 +80,23 @@ static inline int atomic_sub_return(int i, atomic_t *v)
80 return result; 80 return result;
81} 81}
82 82
83static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
84{
85 u32 oldval, res;
86
87 do {
88 __asm__ __volatile__("@ atomic_cmpxchg\n"
89 "ldrex %1, [%2]\n"
90 "teq %1, %3\n"
91 "strexeq %0, %4, [%2]\n"
92 : "=&r" (res), "=&r" (oldval)
93 : "r" (&ptr->counter), "Ir" (old), "r" (new)
94 : "cc");
95 } while (res);
96
97 return oldval;
98}
99
83static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 100static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
84{ 101{
85 unsigned long tmp, tmp2; 102 unsigned long tmp, tmp2;
@@ -131,6 +148,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
131 return val; 148 return val;
132} 149}
133 150
151static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
152{
153 int ret;
154 unsigned long flags;
155
156 local_irq_save(flags);
157 ret = v->counter;
158 if (likely(ret == old))
159 v->counter = new;
160 local_irq_restore(flags);
161
162 return ret;
163}
164
134static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 165static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
135{ 166{
136 unsigned long flags; 167 unsigned long flags;
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 4a88235c0e76..54b24ead7132 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -62,6 +62,20 @@ static inline int atomic_sub_return(int i, atomic_t *v)
62 return val; 62 return val;
63} 63}
64 64
65static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
66{
67 int ret;
68 unsigned long flags;
69
70 local_irq_save(flags);
71 ret = v->counter;
72 if (likely(ret == old))
73 v->counter = new;
74 local_irq_restore(flags);
75
76 return ret;
77}
78
65static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 79static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
66{ 80{
67 unsigned long flags; 81 unsigned long flags;
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 8c2e78304523..45891f7de00f 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -123,6 +123,19 @@ static inline int atomic_inc_and_test(volatile atomic_t *v)
123 return retval; 123 return retval;
124} 124}
125 125
126static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
127{
128 int ret;
129 unsigned long flags;
130
131 cris_atomic_save(v, flags);
132 ret = v->counter;
133 if (likely(ret == old))
134 v->counter = new;
135 cris_atomic_restore(v, flags);
136 return ret;
137}
138
126/* Atomic operations are already serializing */ 139/* Atomic operations are already serializing */
127#define smp_mb__before_atomic_dec() barrier() 140#define smp_mb__before_atomic_dec() barrier()
128#define smp_mb__after_atomic_dec() barrier() 141#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index e75968463428..55f06a0e949f 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -414,4 +414,6 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
414 414
415#endif 415#endif
416 416
417#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
418
417#endif /* _ASM_ATOMIC_H */ 419#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index 7230f6507995..d50439259491 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -82,6 +82,19 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
82 return ret == 0; 82 return ret == 0;
83} 83}
84 84
85static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
86{
87 int ret;
88 unsigned long flags;
89
90 local_irq_save(flags);
91 ret = v->counter;
92 if (likely(ret == old))
93 v->counter = new;
94 local_irq_restore(flags);
95 return ret;
96}
97
85static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) 98static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
86{ 99{
87 __asm__ __volatile__("stc ccr,r1l\n\t" 100 __asm__ __volatile__("stc ccr,r1l\n\t"
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 509720be772a..5ff698e9d2c2 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -215,6 +215,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
215 return atomic_add_return(-i,v); 215 return atomic_add_return(-i,v);
216} 216}
217 217
218#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
219
218#define atomic_inc_return(v) (atomic_add_return(1,v)) 220#define atomic_inc_return(v) (atomic_add_return(1,v))
219#define atomic_dec_return(v) (atomic_sub_return(1,v)) 221#define atomic_dec_return(v) (atomic_sub_return(1,v))
220 222
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 874a6f890e75..593d3da9f3c2 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -88,6 +88,8 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
88 return new; 88 return new;
89} 89}
90 90
91#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
92
91#define atomic_add_return(i,v) \ 93#define atomic_add_return(i,v) \
92({ \ 94({ \
93 int __ia64_aar_i = (i); \ 95 int __ia64_aar_i = (i); \
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 38f3043e7fe1..b821975a361a 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -139,6 +139,8 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
139 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); 139 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
140} 140}
141 141
142#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
143
142/* Atomic operations are already serializing */ 144/* Atomic operations are already serializing */
143#define smp_mb__before_atomic_dec() barrier() 145#define smp_mb__before_atomic_dec() barrier()
144#define smp_mb__after_atomic_dec() barrier() 146#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index a83631ed8c8f..2fd33a56b603 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -128,6 +128,8 @@ static inline int atomic_sub_return(int i, atomic_t * v)
128 return temp; 128 return temp;
129} 129}
130 130
131#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132
131#define atomic_dec_return(v) atomic_sub_return(1,(v)) 133#define atomic_dec_return(v) atomic_sub_return(1,(v))
132#define atomic_inc_return(v) atomic_add_return(1,(v)) 134#define atomic_inc_return(v) atomic_add_return(1,(v))
133 135
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 6202eb8a14b7..4fba0d003c99 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -287,6 +287,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
287 return result; 287 return result;
288} 288}
289 289
290#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
291
290#define atomic_dec_return(v) atomic_sub_return(1,(v)) 292#define atomic_dec_return(v) atomic_sub_return(1,(v))
291#define atomic_inc_return(v) atomic_add_return(1,(v)) 293#define atomic_inc_return(v) atomic_add_return(1,(v))
292 294
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 048a2c7fd0c0..52c9a45b5f87 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -164,6 +164,7 @@ static __inline__ int atomic_read(const atomic_t *v)
164} 164}
165 165
166/* exported interface */ 166/* exported interface */
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
167 168
168#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) 169#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
169#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) 170#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 9c0b372a46e1..37205faa9d7c 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -164,6 +164,8 @@ static __inline__ int atomic_dec_return(atomic_t *v)
164 return t; 164 return t;
165} 165}
166 166
167#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
168
167#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 169#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
168#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) 170#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
169 171
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 9d86ba6f12d0..631014d5de90 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -198,6 +198,8 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
198 return retval; 198 return retval;
199} 199}
200 200
201#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
202
201#define smp_mb__before_atomic_dec() smp_mb() 203#define smp_mb__before_atomic_dec() smp_mb()
202#define smp_mb__after_atomic_dec() smp_mb() 204#define smp_mb__after_atomic_dec() smp_mb()
203#define smp_mb__before_atomic_inc() smp_mb() 205#define smp_mb__before_atomic_inc() smp_mb()
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index 3c4f805da1ac..a148c762d366 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -87,6 +87,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
87#define atomic_inc(v) atomic_add(1,(v)) 87#define atomic_inc(v) atomic_add(1,(v))
88#define atomic_dec(v) atomic_sub(1,(v)) 88#define atomic_dec(v) atomic_sub(1,(v))
89 89
90static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
91{
92 int ret;
93 unsigned long flags;
94
95 local_irq_save(flags);
96 ret = v->counter;
97 if (likely(ret == old))
98 v->counter = new;
99 local_irq_restore(flags);
100
101 return ret;
102}
103
90static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 104static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
91{ 105{
92 unsigned long flags; 106 unsigned long flags;
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index 8c3872d3e65f..6eeb57b015ce 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -99,6 +99,20 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
99#define atomic_inc(v) atomic_add(1,(v)) 99#define atomic_inc(v) atomic_add(1,(v))
100#define atomic_dec(v) atomic_sub(1,(v)) 100#define atomic_dec(v) atomic_sub(1,(v))
101 101
102static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{
104 int ret;
105 unsigned long flags;
106
107 local_irq_save(flags);
108 ret = v->counter;
109 if (likely(ret == old))
110 v->counter = new;
111 local_irq_restore(flags);
112
113 return ret;
114}
115
102static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) 116static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
103{ 117{
104 unsigned long flags; 118 unsigned long flags;
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 37f6ab601c3d..52bdd1a895fa 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -19,6 +19,7 @@ typedef struct { volatile int counter; } atomic_t;
19#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
20 20
21extern int __atomic_add_return(int, atomic_t *); 21extern int __atomic_add_return(int, atomic_t *);
22extern int atomic_cmpxchg(atomic_t *, int, int);
22extern void atomic_set(atomic_t *, int); 23extern void atomic_set(atomic_t *, int);
23 24
24#define atomic_read(v) ((v)->counter) 25#define atomic_read(v) ((v)->counter)
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index e175afcf2cde..3a0b4383bbac 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -70,6 +70,8 @@ extern int atomic64_sub_ret(int, atomic64_t *);
70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) 70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
72 72
73#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
74
73/* Atomic operations are already serializing */ 75/* Atomic operations are already serializing */
74#ifdef CONFIG_SMP 76#ifdef CONFIG_SMP
75#define smp_mb__before_atomic_dec() membar_storeload_loadload(); 77#define smp_mb__before_atomic_dec() membar_storeload_loadload();
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index 395268a8c0de..e497166ca42b 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -90,6 +90,20 @@ static __inline__ void atomic_clear_mask (unsigned long mask, unsigned long *add
90#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0) 90#define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0)
91#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0) 91#define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0)
92 92
93static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
94{
95 int ret;
96 unsigned long flags;
97
98 local_irq_save(flags);
99 ret = v->counter;
100 if (likely(ret == old))
101 v->counter = new;
102 local_irq_restore(flags);
103
104 return ret;
105}
106
93/* Atomic operations are already serializing on ARM */ 107/* Atomic operations are already serializing on ARM */
94#define smp_mb__before_atomic_dec() barrier() 108#define smp_mb__before_atomic_dec() barrier()
95#define smp_mb__after_atomic_dec() barrier() 109#define smp_mb__after_atomic_dec() barrier()
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index fc4c5956e1ea..75c8a1e96737 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -360,6 +360,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
360 return atomic_add_return(-i,v); 360 return atomic_add_return(-i,v);
361} 361}
362 362
363#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
364
363#define atomic_inc_return(v) (atomic_add_return(1,v)) 365#define atomic_inc_return(v) (atomic_add_return(1,v))
364#define atomic_dec_return(v) (atomic_sub_return(1,v)) 366#define atomic_dec_return(v) (atomic_sub_return(1,v))
365 367
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 12b5732dc6e5..cd40c5e75160 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -223,6 +223,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
223 */ 223 */
224#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0) 224#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
225 225
226#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
226 227
227static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 228static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
228{ 229{