aboutsummaryrefslogtreecommitdiffstats
path: root/arch/hexagon/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-23 13:20:26 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-14 06:48:06 -0400
commit50f853e38b0b90a5703ab14b70e20eb5a8ccd5de (patch)
tree37965d41162c827fb35c5ae926d7aeb46f6392e4 /arch/hexagon/include
parent7179e30ef66a5bae91592ae7fbacf3df6c627dd6 (diff)
locking,arch,hexagon: Fold atomic_ops
OK, no LoC saved in this case because the !return variants were defined in terms of the return ops. Still do it because this also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Richard Kuo <rkuo@codeaurora.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: linux-hexagon@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.171567636@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/hexagon/include')
-rw-r--r--arch/hexagon/include/asm/atomic.h68
1 files changed, 37 insertions, 31 deletions
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index de916b11bff5..93d07025f183 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
94 return __oldval; 94 return __oldval;
95} 95}
96 96
97static inline int atomic_add_return(int i, atomic_t *v) 97#define ATOMIC_OP(op) \
98{ 98static inline void atomic_##op(int i, atomic_t *v) \
99 int output; 99{ \
100 100 int output; \
101 __asm__ __volatile__ ( 101 \
102 "1: %0 = memw_locked(%1);\n" 102 __asm__ __volatile__ ( \
103 " %0 = add(%0,%2);\n" 103 "1: %0 = memw_locked(%1);\n" \
104 " memw_locked(%1,P3)=%0;\n" 104 " %0 = "#op "(%0,%2);\n" \
105 " if !P3 jump 1b;\n" 105 " memw_locked(%1,P3)=%0;\n" \
106 : "=&r" (output) 106 " if !P3 jump 1b;\n" \
107 : "r" (&v->counter), "r" (i) 107 : "=&r" (output) \
108 : "memory", "p3" 108 : "r" (&v->counter), "r" (i) \
109 ); 109 : "memory", "p3" \
110 return output; 110 ); \
111 111} \
112
113#define ATOMIC_OP_RETURN(op) \
114static inline int atomic_##op##_return(int i, atomic_t *v) \
115{ \
116 int output; \
117 \
118 __asm__ __volatile__ ( \
119 "1: %0 = memw_locked(%1);\n" \
120 " %0 = "#op "(%0,%2);\n" \
121 " memw_locked(%1,P3)=%0;\n" \
122 " if !P3 jump 1b;\n" \
123 : "=&r" (output) \
124 : "r" (&v->counter), "r" (i) \
125 : "memory", "p3" \
126 ); \
127 return output; \
112} 128}
113 129
114#define atomic_add(i, v) atomic_add_return(i, (v)) 130#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
115 131
116static inline int atomic_sub_return(int i, atomic_t *v) 132ATOMIC_OPS(add)
117{ 133ATOMIC_OPS(sub)
118 int output;
119 __asm__ __volatile__ (
120 "1: %0 = memw_locked(%1);\n"
121 " %0 = sub(%0,%2);\n"
122 " memw_locked(%1,P3)=%0\n"
123 " if !P3 jump 1b;\n"
124 : "=&r" (output)
125 : "r" (&v->counter), "r" (i)
126 : "memory", "p3"
127 );
128 return output;
129}
130 134
131#define atomic_sub(i, v) atomic_sub_return(i, (v)) 135#undef ATOMIC_OPS
136#undef ATOMIC_OP_RETURN
137#undef ATOMIC_OP
132 138
133/** 139/**
134 * __atomic_add_unless - add unless the number is a given value 140 * __atomic_add_unless - add unless the number is a given value