aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-04-17 19:15:25 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-16 04:48:34 -0400
commit4ec45856b698c37e73d973fb4b1a094dfb9d5732 (patch)
tree836aea2f591019919588385ea43bd8580fe37e1a
parentfe14d2f12d5e641f114e27c2ea1fb85843c58967 (diff)
locking/atomic, arch/mips: Convert to _relaxed atomics
Generic code will construct {,_acquire,_release} versions by adding the required smp_mb__{before,after}_atomic() calls. XXX if/when MIPS will start using their new SYNCxx instructions they can provide custom __atomic_op_{acquire,release}() macros as per the powerpc example. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/mips/include/asm/atomic.h42
1 files changed, 22 insertions, 20 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 387ce288334e..0ab176bdb8e8 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
79} 79}
80 80
81#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 81#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
82static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ 82static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
83{ \ 83{ \
84 int result; \ 84 int result; \
85 \ 85 \
86 smp_mb__before_llsc(); \
87 \
88 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 86 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
89 int temp; \ 87 int temp; \
90 \ 88 \
@@ -125,18 +123,14 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
125 raw_local_irq_restore(flags); \ 123 raw_local_irq_restore(flags); \
126 } \ 124 } \
127 \ 125 \
128 smp_llsc_mb(); \
129 \
130 return result; \ 126 return result; \
131} 127}
132 128
133#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ 129#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
134static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \ 130static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
135{ \ 131{ \
136 int result; \ 132 int result; \
137 \ 133 \
138 smp_mb__before_llsc(); \
139 \
140 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 134 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
141 int temp; \ 135 int temp; \
142 \ 136 \
@@ -176,8 +170,6 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \
176 raw_local_irq_restore(flags); \ 170 raw_local_irq_restore(flags); \
177 } \ 171 } \
178 \ 172 \
179 smp_llsc_mb(); \
180 \
181 return result; \ 173 return result; \
182} 174}
183 175
@@ -189,6 +181,11 @@ static __inline__ int atomic_fetch_##op(int i, atomic_t * v) \
189ATOMIC_OPS(add, +=, addu) 181ATOMIC_OPS(add, +=, addu)
190ATOMIC_OPS(sub, -=, subu) 182ATOMIC_OPS(sub, -=, subu)
191 183
184#define atomic_add_return_relaxed atomic_add_return_relaxed
185#define atomic_sub_return_relaxed atomic_sub_return_relaxed
186#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
187#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
188
192#undef ATOMIC_OPS 189#undef ATOMIC_OPS
193#define ATOMIC_OPS(op, c_op, asm_op) \ 190#define ATOMIC_OPS(op, c_op, asm_op) \
194 ATOMIC_OP(op, c_op, asm_op) \ 191 ATOMIC_OP(op, c_op, asm_op) \
@@ -198,6 +195,10 @@ ATOMIC_OPS(and, &=, and)
198ATOMIC_OPS(or, |=, or) 195ATOMIC_OPS(or, |=, or)
199ATOMIC_OPS(xor, ^=, xor) 196ATOMIC_OPS(xor, ^=, xor)
200 197
198#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
199#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
200#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
201
201#undef ATOMIC_OPS 202#undef ATOMIC_OPS
202#undef ATOMIC_FETCH_OP 203#undef ATOMIC_FETCH_OP
203#undef ATOMIC_OP_RETURN 204#undef ATOMIC_OP_RETURN
@@ -420,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
420} 421}
421 422
422#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 423#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
423static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 424static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
424{ \ 425{ \
425 long result; \ 426 long result; \
426 \ 427 \
427 smp_mb__before_llsc(); \
428 \
429 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 428 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
430 long temp; \ 429 long temp; \
431 \ 430 \
@@ -467,18 +466,14 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
467 raw_local_irq_restore(flags); \ 466 raw_local_irq_restore(flags); \
468 } \ 467 } \
469 \ 468 \
470 smp_llsc_mb(); \
471 \
472 return result; \ 469 return result; \
473} 470}
474 471
475#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ 472#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
476static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \ 473static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
477{ \ 474{ \
478 long result; \ 475 long result; \
479 \ 476 \
480 smp_mb__before_llsc(); \
481 \
482 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 477 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
483 long temp; \ 478 long temp; \
484 \ 479 \
@@ -519,8 +514,6 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
519 raw_local_irq_restore(flags); \ 514 raw_local_irq_restore(flags); \
520 } \ 515 } \
521 \ 516 \
522 smp_llsc_mb(); \
523 \
524 return result; \ 517 return result; \
525} 518}
526 519
@@ -532,6 +525,11 @@ static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
532ATOMIC64_OPS(add, +=, daddu) 525ATOMIC64_OPS(add, +=, daddu)
533ATOMIC64_OPS(sub, -=, dsubu) 526ATOMIC64_OPS(sub, -=, dsubu)
534 527
528#define atomic64_add_return_relaxed atomic64_add_return_relaxed
529#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
530#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
531#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
532
535#undef ATOMIC64_OPS 533#undef ATOMIC64_OPS
536#define ATOMIC64_OPS(op, c_op, asm_op) \ 534#define ATOMIC64_OPS(op, c_op, asm_op) \
537 ATOMIC64_OP(op, c_op, asm_op) \ 535 ATOMIC64_OP(op, c_op, asm_op) \
@@ -541,6 +539,10 @@ ATOMIC64_OPS(and, &=, and)
541ATOMIC64_OPS(or, |=, or) 539ATOMIC64_OPS(or, |=, or)
542ATOMIC64_OPS(xor, ^=, xor) 540ATOMIC64_OPS(xor, ^=, xor)
543 541
542#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
543#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
544#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
545
544#undef ATOMIC64_OPS 546#undef ATOMIC64_OPS
545#undef ATOMIC64_FETCH_OP 547#undef ATOMIC64_FETCH_OP
546#undef ATOMIC64_OP_RETURN 548#undef ATOMIC64_OP_RETURN