diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-04-17 19:16:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-16 04:48:25 -0400 |
commit | f649370523033c7c2adf16a9d062438c8a7758b3 (patch) | |
tree | 671dd5b1d9354e91053521dd29ad90f3f05c80a9 | |
parent | cc102507fac75f9f4f37938f49d10c25e596a608 (diff) |
locking/atomic, arch/m32r: Implement atomic_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.
This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/m32r/include/asm/atomic.h | 38 |
1 files changed, 34 insertions, 4 deletions
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index ea35160d632b..8ba8a0ab5d5d 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h | |||
@@ -89,16 +89,46 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ | |||
89 | return result; \ | 89 | return result; \ |
90 | } | 90 | } |
91 | 91 | ||
92 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 92 | #define ATOMIC_FETCH_OP(op) \ |
93 | static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ | ||
94 | { \ | ||
95 | unsigned long flags; \ | ||
96 | int result, val; \ | ||
97 | \ | ||
98 | local_irq_save(flags); \ | ||
99 | __asm__ __volatile__ ( \ | ||
100 | "# atomic_fetch_" #op " \n\t" \ | ||
101 | DCACHE_CLEAR("%0", "r4", "%2") \ | ||
102 | M32R_LOCK" %1, @%2; \n\t" \ | ||
103 | "mv %0, %1 \n\t" \ | ||
104 | #op " %1, %3; \n\t" \ | ||
105 | M32R_UNLOCK" %1, @%2; \n\t" \ | ||
106 | : "=&r" (result), "=&r" (val) \ | ||
107 | : "r" (&v->counter), "r" (i) \ | ||
108 | : "memory" \ | ||
109 | __ATOMIC_CLOBBER \ | ||
110 | ); \ | ||
111 | local_irq_restore(flags); \ | ||
112 | \ | ||
113 | return result; \ | ||
114 | } | ||
115 | |||
116 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
93 | 117 | ||
94 | ATOMIC_OPS(add) | 118 | ATOMIC_OPS(add) |
95 | ATOMIC_OPS(sub) | 119 | ATOMIC_OPS(sub) |
96 | 120 | ||
97 | ATOMIC_OP(and) | 121 | #undef ATOMIC_OPS |
98 | ATOMIC_OP(or) | 122 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
99 | ATOMIC_OP(xor) | 123 | |
124 | #define atomic_fetch_or atomic_fetch_or | ||
125 | |||
126 | ATOMIC_OPS(and) | ||
127 | ATOMIC_OPS(or) | ||
128 | ATOMIC_OPS(xor) | ||
100 | 129 | ||
101 | #undef ATOMIC_OPS | 130 | #undef ATOMIC_OPS |
131 | #undef ATOMIC_FETCH_OP | ||
102 | #undef ATOMIC_OP_RETURN | 132 | #undef ATOMIC_OP_RETURN |
103 | #undef ATOMIC_OP | 133 | #undef ATOMIC_OP |
104 | 134 | ||