summaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-26 13:04:44 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-14 06:48:11 -0400
commit15e3f6d782fc6ff7e004b40642ad895b91ae78bf (patch)
tree894a8f52a270f1b2efdea955b346e75d2eb891a9 /arch/parisc
parente69a0ef76627005e3e83d0e086e6bb1d247bb65b (diff)
locking,arch,parisc: Fold atomic_ops
OK, no LoC saved in this case because sub was defined in terms of add. Still do it because this also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Helge Deller <deller@gmx.de> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: linux-parisc@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.659342353@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/atomic.h113
1 files changed, 69 insertions, 44 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 0be2db2c7d44..219750bb4ae7 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
55 * are atomic, so a reader never sees inconsistent values. 55 * are atomic, so a reader never sees inconsistent values.
56 */ 56 */
57 57
58/* It's possible to reduce all atomic operations to either 58static __inline__ void atomic_set(atomic_t *v, int i)
59 * __atomic_add_return, atomic_set and atomic_read (the latter
60 * is there only for consistency).
61 */
62
63static __inline__ int __atomic_add_return(int i, atomic_t *v)
64{
65 int ret;
66 unsigned long flags;
67 _atomic_spin_lock_irqsave(v, flags);
68
69 ret = (v->counter += i);
70
71 _atomic_spin_unlock_irqrestore(v, flags);
72 return ret;
73}
74
75static __inline__ void atomic_set(atomic_t *v, int i)
76{ 59{
77 unsigned long flags; 60 unsigned long flags;
78 _atomic_spin_lock_irqsave(v, flags); 61 _atomic_spin_lock_irqsave(v, flags);
@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
115 return c; 98 return c;
116} 99}
117 100
101#define ATOMIC_OP(op, c_op) \
102static __inline__ void atomic_##op(int i, atomic_t *v) \
103{ \
104 unsigned long flags; \
105 \
106 _atomic_spin_lock_irqsave(v, flags); \
107 v->counter c_op i; \
108 _atomic_spin_unlock_irqrestore(v, flags); \
109} \
110
111#define ATOMIC_OP_RETURN(op, c_op) \
112static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
113{ \
114 unsigned long flags; \
115 int ret; \
116 \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
120 \
121 return ret; \
122}
123
124#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=)
128
129#undef ATOMIC_OPS
130#undef ATOMIC_OP_RETURN
131#undef ATOMIC_OP
118 132
119#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) 133#define atomic_inc(v) (atomic_add( 1,(v)))
120#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) 134#define atomic_dec(v) (atomic_add( -1,(v)))
121#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
122#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
123 135
124#define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) 136#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
125#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) 137#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
126#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
127#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
128 138
129#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 139#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
130 140
@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
148 158
149#define ATOMIC64_INIT(i) { (i) } 159#define ATOMIC64_INIT(i) { (i) }
150 160
151static __inline__ s64 161#define ATOMIC64_OP(op, c_op) \
152__atomic64_add_return(s64 i, atomic64_t *v) 162static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
153{ 163{ \
154 s64 ret; 164 unsigned long flags; \
155 unsigned long flags; 165 \
156 _atomic_spin_lock_irqsave(v, flags); 166 _atomic_spin_lock_irqsave(v, flags); \
167 v->counter c_op i; \
168 _atomic_spin_unlock_irqrestore(v, flags); \
169} \
170
171#define ATOMIC64_OP_RETURN(op, c_op) \
172static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
173{ \
174 unsigned long flags; \
175 s64 ret; \
176 \
177 _atomic_spin_lock_irqsave(v, flags); \
178 ret = (v->counter c_op i); \
179 _atomic_spin_unlock_irqrestore(v, flags); \
180 \
181 return ret; \
182}
157 183
158 ret = (v->counter += i); 184#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
159 185
160 _atomic_spin_unlock_irqrestore(v, flags); 186ATOMIC64_OPS(add, +=)
161 return ret; 187ATOMIC64_OPS(sub, -=)
162} 188
189#undef ATOMIC64_OPS
190#undef ATOMIC64_OP_RETURN
191#undef ATOMIC64_OP
163 192
164static __inline__ void 193static __inline__ void
165atomic64_set(atomic64_t *v, s64 i) 194atomic64_set(atomic64_t *v, s64 i)
@@ -178,15 +207,11 @@ atomic64_read(const atomic64_t *v)
178 return (*(volatile long *)&(v)->counter); 207 return (*(volatile long *)&(v)->counter);
179} 208}
180 209
181#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) 210#define atomic64_inc(v) (atomic64_add( 1,(v)))
182#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v)))) 211#define atomic64_dec(v) (atomic64_add( -1,(v)))
183#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
184#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
185 212
186#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v))) 213#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
187#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v))) 214#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
188#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
189#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
190 215
191#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 216#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
192 217