aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/powerpc
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/atomic.h198
1 files changed, 77 insertions, 121 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 28992d012926..512d2782b043 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
26 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); 26 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
27} 27}
28 28
29static __inline__ void atomic_add(int a, atomic_t *v) 29#define ATOMIC_OP(op, asm_op) \
30{ 30static __inline__ void atomic_##op(int a, atomic_t *v) \
31 int t; 31{ \
32 32 int t; \
33 __asm__ __volatile__( 33 \
34"1: lwarx %0,0,%3 # atomic_add\n\ 34 __asm__ __volatile__( \
35 add %0,%2,%0\n" 35"1: lwarx %0,0,%3 # atomic_" #op "\n" \
36 PPC405_ERR77(0,%3) 36 #asm_op " %0,%2,%0\n" \
37" stwcx. %0,0,%3 \n\ 37 PPC405_ERR77(0,%3) \
38 bne- 1b" 38" stwcx. %0,0,%3 \n" \
39 : "=&r" (t), "+m" (v->counter) 39" bne- 1b\n" \
40 : "r" (a), "r" (&v->counter) 40 : "=&r" (t), "+m" (v->counter) \
41 : "cc"); 41 : "r" (a), "r" (&v->counter) \
42 : "cc"); \
43} \
44
45#define ATOMIC_OP_RETURN(op, asm_op) \
46static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
47{ \
48 int t; \
49 \
50 __asm__ __volatile__( \
51 PPC_ATOMIC_ENTRY_BARRIER \
52"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
53 #asm_op " %0,%1,%0\n" \
54 PPC405_ERR77(0,%2) \
55" stwcx. %0,0,%2 \n" \
56" bne- 1b\n" \
57 PPC_ATOMIC_EXIT_BARRIER \
58 : "=&r" (t) \
59 : "r" (a), "r" (&v->counter) \
60 : "cc", "memory"); \
61 \
62 return t; \
42} 63}
43 64
44static __inline__ int atomic_add_return(int a, atomic_t *v) 65#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
45{
46 int t;
47 66
48 __asm__ __volatile__( 67ATOMIC_OPS(add, add)
49 PPC_ATOMIC_ENTRY_BARRIER 68ATOMIC_OPS(sub, subf)
50"1: lwarx %0,0,%2 # atomic_add_return\n\
51 add %0,%1,%0\n"
52 PPC405_ERR77(0,%2)
53" stwcx. %0,0,%2 \n\
54 bne- 1b"
55 PPC_ATOMIC_EXIT_BARRIER
56 : "=&r" (t)
57 : "r" (a), "r" (&v->counter)
58 : "cc", "memory");
59 69
60 return t; 70#undef ATOMIC_OPS
61} 71#undef ATOMIC_OP_RETURN
72#undef ATOMIC_OP
62 73
63#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 74#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
64 75
65static __inline__ void atomic_sub(int a, atomic_t *v)
66{
67 int t;
68
69 __asm__ __volatile__(
70"1: lwarx %0,0,%3 # atomic_sub\n\
71 subf %0,%2,%0\n"
72 PPC405_ERR77(0,%3)
73" stwcx. %0,0,%3 \n\
74 bne- 1b"
75 : "=&r" (t), "+m" (v->counter)
76 : "r" (a), "r" (&v->counter)
77 : "cc");
78}
79
80static __inline__ int atomic_sub_return(int a, atomic_t *v)
81{
82 int t;
83
84 __asm__ __volatile__(
85 PPC_ATOMIC_ENTRY_BARRIER
86"1: lwarx %0,0,%2 # atomic_sub_return\n\
87 subf %0,%1,%0\n"
88 PPC405_ERR77(0,%2)
89" stwcx. %0,0,%2 \n\
90 bne- 1b"
91 PPC_ATOMIC_EXIT_BARRIER
92 : "=&r" (t)
93 : "r" (a), "r" (&v->counter)
94 : "cc", "memory");
95
96 return t;
97}
98
99static __inline__ void atomic_inc(atomic_t *v) 76static __inline__ void atomic_inc(atomic_t *v)
100{ 77{
101 int t; 78 int t;
@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
289 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); 266 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
290} 267}
291 268
292static __inline__ void atomic64_add(long a, atomic64_t *v) 269#define ATOMIC64_OP(op, asm_op) \
293{ 270static __inline__ void atomic64_##op(long a, atomic64_t *v) \
294 long t; 271{ \
295 272 long t; \
296 __asm__ __volatile__( 273 \
297"1: ldarx %0,0,%3 # atomic64_add\n\ 274 __asm__ __volatile__( \
298 add %0,%2,%0\n\ 275"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
299 stdcx. %0,0,%3 \n\ 276 #asm_op " %0,%2,%0\n" \
300 bne- 1b" 277" stdcx. %0,0,%3 \n" \
301 : "=&r" (t), "+m" (v->counter) 278" bne- 1b\n" \
302 : "r" (a), "r" (&v->counter) 279 : "=&r" (t), "+m" (v->counter) \
303 : "cc"); 280 : "r" (a), "r" (&v->counter) \
281 : "cc"); \
304} 282}
305 283
306static __inline__ long atomic64_add_return(long a, atomic64_t *v) 284#define ATOMIC64_OP_RETURN(op, asm_op) \
307{ 285static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
308 long t; 286{ \
309 287 long t; \
310 __asm__ __volatile__( 288 \
311 PPC_ATOMIC_ENTRY_BARRIER 289 __asm__ __volatile__( \
312"1: ldarx %0,0,%2 # atomic64_add_return\n\ 290 PPC_ATOMIC_ENTRY_BARRIER \
313 add %0,%1,%0\n\ 291"1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
314 stdcx. %0,0,%2 \n\ 292 #asm_op " %0,%1,%0\n" \
315 bne- 1b" 293" stdcx. %0,0,%2 \n" \
316 PPC_ATOMIC_EXIT_BARRIER 294" bne- 1b\n" \
317 : "=&r" (t) 295 PPC_ATOMIC_EXIT_BARRIER \
318 : "r" (a), "r" (&v->counter) 296 : "=&r" (t) \
319 : "cc", "memory"); 297 : "r" (a), "r" (&v->counter) \
320 298 : "cc", "memory"); \
321 return t; 299 \
300 return t; \
322} 301}
323 302
324#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 303#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
325
326static __inline__ void atomic64_sub(long a, atomic64_t *v)
327{
328 long t;
329
330 __asm__ __volatile__(
331"1: ldarx %0,0,%3 # atomic64_sub\n\
332 subf %0,%2,%0\n\
333 stdcx. %0,0,%3 \n\
334 bne- 1b"
335 : "=&r" (t), "+m" (v->counter)
336 : "r" (a), "r" (&v->counter)
337 : "cc");
338}
339 304
340static __inline__ long atomic64_sub_return(long a, atomic64_t *v) 305ATOMIC64_OPS(add, add)
341{ 306ATOMIC64_OPS(sub, subf)
342 long t;
343 307
344 __asm__ __volatile__( 308#undef ATOMIC64_OPS
345 PPC_ATOMIC_ENTRY_BARRIER 309#undef ATOMIC64_OP_RETURN
346"1: ldarx %0,0,%2 # atomic64_sub_return\n\ 310#undef ATOMIC64_OP
347 subf %0,%1,%0\n\
348 stdcx. %0,0,%2 \n\
349 bne- 1b"
350 PPC_ATOMIC_EXIT_BARRIER
351 : "=&r" (t)
352 : "r" (a), "r" (&v->counter)
353 : "cc", "memory");
354 311
355 return t; 312#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
356}
357 313
358static __inline__ void atomic64_inc(atomic64_t *v) 314static __inline__ void atomic64_inc(atomic64_t *v)
359{ 315{