aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/parisc
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/include/asm/atomic.h117
1 files changed, 71 insertions, 46 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 0be2db2c7d44..226f8ca993f6 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
55 * are atomic, so a reader never sees inconsistent values. 55 * are atomic, so a reader never sees inconsistent values.
56 */ 56 */
57 57
58/* It's possible to reduce all atomic operations to either 58static __inline__ void atomic_set(atomic_t *v, int i)
59 * __atomic_add_return, atomic_set and atomic_read (the latter
60 * is there only for consistency).
61 */
62
63static __inline__ int __atomic_add_return(int i, atomic_t *v)
64{
65 int ret;
66 unsigned long flags;
67 _atomic_spin_lock_irqsave(v, flags);
68
69 ret = (v->counter += i);
70
71 _atomic_spin_unlock_irqrestore(v, flags);
72 return ret;
73}
74
75static __inline__ void atomic_set(atomic_t *v, int i)
76{ 59{
77 unsigned long flags; 60 unsigned long flags;
78 _atomic_spin_lock_irqsave(v, flags); 61 _atomic_spin_lock_irqsave(v, flags);
@@ -84,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
84 67
85static __inline__ int atomic_read(const atomic_t *v) 68static __inline__ int atomic_read(const atomic_t *v)
86{ 69{
87 return (*(volatile int *)&(v)->counter); 70 return ACCESS_ONCE((v)->counter);
88} 71}
89 72
90/* exported interface */ 73/* exported interface */
@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
115 return c; 98 return c;
116} 99}
117 100
101#define ATOMIC_OP(op, c_op) \
102static __inline__ void atomic_##op(int i, atomic_t *v) \
103{ \
104 unsigned long flags; \
105 \
106 _atomic_spin_lock_irqsave(v, flags); \
107 v->counter c_op i; \
108 _atomic_spin_unlock_irqrestore(v, flags); \
109} \
110
111#define ATOMIC_OP_RETURN(op, c_op) \
112static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
113{ \
114 unsigned long flags; \
115 int ret; \
116 \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
120 \
121 return ret; \
122}
123
124#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=)
128
129#undef ATOMIC_OPS
130#undef ATOMIC_OP_RETURN
131#undef ATOMIC_OP
118 132
119#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) 133#define atomic_inc(v) (atomic_add( 1,(v)))
120#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) 134#define atomic_dec(v) (atomic_add( -1,(v)))
121#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
122#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
123 135
124#define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) 136#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
125#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) 137#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
126#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
127#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
128 138
129#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 139#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
130 140
@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
148 158
149#define ATOMIC64_INIT(i) { (i) } 159#define ATOMIC64_INIT(i) { (i) }
150 160
151static __inline__ s64 161#define ATOMIC64_OP(op, c_op) \
152__atomic64_add_return(s64 i, atomic64_t *v) 162static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
153{ 163{ \
154 s64 ret; 164 unsigned long flags; \
155 unsigned long flags; 165 \
156 _atomic_spin_lock_irqsave(v, flags); 166 _atomic_spin_lock_irqsave(v, flags); \
167 v->counter c_op i; \
168 _atomic_spin_unlock_irqrestore(v, flags); \
169} \
170
171#define ATOMIC64_OP_RETURN(op, c_op) \
172static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
173{ \
174 unsigned long flags; \
175 s64 ret; \
176 \
177 _atomic_spin_lock_irqsave(v, flags); \
178 ret = (v->counter c_op i); \
179 _atomic_spin_unlock_irqrestore(v, flags); \
180 \
181 return ret; \
182}
157 183
158 ret = (v->counter += i); 184#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
159 185
160 _atomic_spin_unlock_irqrestore(v, flags); 186ATOMIC64_OPS(add, +=)
161 return ret; 187ATOMIC64_OPS(sub, -=)
162} 188
189#undef ATOMIC64_OPS
190#undef ATOMIC64_OP_RETURN
191#undef ATOMIC64_OP
163 192
164static __inline__ void 193static __inline__ void
165atomic64_set(atomic64_t *v, s64 i) 194atomic64_set(atomic64_t *v, s64 i)
@@ -175,18 +204,14 @@ atomic64_set(atomic64_t *v, s64 i)
175static __inline__ s64 204static __inline__ s64
176atomic64_read(const atomic64_t *v) 205atomic64_read(const atomic64_t *v)
177{ 206{
178 return (*(volatile long *)&(v)->counter); 207 return ACCESS_ONCE((v)->counter);
179} 208}
180 209
181#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) 210#define atomic64_inc(v) (atomic64_add( 1,(v)))
182#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v)))) 211#define atomic64_dec(v) (atomic64_add( -1,(v)))
183#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
184#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
185 212
186#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v))) 213#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
187#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v))) 214#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
188#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
189#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
190 215
191#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 216#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
192 217