aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-04-23 13:50:20 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 08:06:23 -0400
commitddb7573ff68964e7b3b72eeb9cde1384c4c6ba83 (patch)
treec71e6b79b6b6e089ee8667340f5f1ca3210ee8d9
parent27782f2752aca65a241f10fb2d4508c71bb2656b (diff)
mn10300: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. These will replace the atomic_{set,clear}_mask functions that are available on some archs. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/mn10300/include/asm/atomic.h57
1 files changed, 10 insertions, 47 deletions
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5be655e83e70..03eea8158cf9 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -89,6 +89,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
89ATOMIC_OPS(add) 89ATOMIC_OPS(add)
90ATOMIC_OPS(sub) 90ATOMIC_OPS(sub)
91 91
92#define CONFIG_ARCH_HAS_ATOMIC_OR
93
94ATOMIC_OP(and)
95ATOMIC_OP(or)
96ATOMIC_OP(xor)
97
92#undef ATOMIC_OPS 98#undef ATOMIC_OPS
93#undef ATOMIC_OP_RETURN 99#undef ATOMIC_OP_RETURN
94#undef ATOMIC_OP 100#undef ATOMIC_OP
@@ -134,31 +140,9 @@ static inline void atomic_dec(atomic_t *v)
134 * 140 *
135 * Atomically clears the bits set in mask from the memory word specified. 141 * Atomically clears the bits set in mask from the memory word specified.
136 */ 142 */
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 143static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
138{ 144{
139#ifdef CONFIG_SMP 145 atomic_and(~mask, v);
140 int status;
141
142 asm volatile(
143 "1: mov %3,(_AAR,%2) \n"
144 " mov (_ADR,%2),%0 \n"
145 " and %4,%0 \n"
146 " mov %0,(_ADR,%2) \n"
147 " mov (_ADR,%2),%0 \n" /* flush */
148 " mov (_ASR,%2),%0 \n"
149 " or %0,%0 \n"
150 " bne 1b \n"
151 : "=&r"(status), "=m"(*addr)
152 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
153 : "memory", "cc");
154#else
155 unsigned long flags;
156
157 mask = ~mask;
158 flags = arch_local_cli_save();
159 *addr &= mask;
160 arch_local_irq_restore(flags);
161#endif
162} 146}
163 147
164/** 148/**
@@ -168,30 +152,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
168 * 152 *
169 * Atomically sets the bits set in mask from the memory word specified. 153 * Atomically sets the bits set in mask from the memory word specified.
170 */ 154 */
171static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) 155static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
172{ 156{
173#ifdef CONFIG_SMP 157 atomic_or(mask, v);
174 int status;
175
176 asm volatile(
177 "1: mov %3,(_AAR,%2) \n"
178 " mov (_ADR,%2),%0 \n"
179 " or %4,%0 \n"
180 " mov %0,(_ADR,%2) \n"
181 " mov (_ADR,%2),%0 \n" /* flush */
182 " mov (_ASR,%2),%0 \n"
183 " or %0,%0 \n"
184 " bne 1b \n"
185 : "=&r"(status), "=m"(*addr)
186 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
187 : "memory", "cc");
188#else
189 unsigned long flags;
190
191 flags = arch_local_cli_save();
192 *addr |= mask;
193 arch_local_irq_restore(flags);
194#endif
195} 158}
196 159
197#endif /* __KERNEL__ */ 160#endif /* __KERNEL__ */