aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-23 11:29:31 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-14 06:48:03 -0400
commitf7d11e93ee97a37da1947b7c4e1794705a6f360c (patch)
tree1506947da50b64639fa758f8562b89cca739c45d /arch/arc
parentb93c7b8c5b281bf3646d6c5b6e05249b98cc5ab7 (diff)
locking,arch,arc: Fold atomic_ops
Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Vineet Gupta <vgupta@synopsys.com> Link: http://lkml.kernel.org/r/20140508135851.886055622@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/atomic.h184
1 files changed, 63 insertions, 121 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 83f03ca6caf6..173f303a868f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,79 +25,36 @@
25 25
26#define atomic_set(v, i) (((v)->counter) = (i)) 26#define atomic_set(v, i) (((v)->counter) = (i))
27 27
28static inline void atomic_add(int i, atomic_t *v) 28#define ATOMIC_OP(op, c_op, asm_op) \
29{ 29static inline void atomic_##op(int i, atomic_t *v) \
30 unsigned int temp; 30{ \
31 31 unsigned int temp; \
32 __asm__ __volatile__( 32 \
33 "1: llock %0, [%1] \n" 33 __asm__ __volatile__( \
34 " add %0, %0, %2 \n" 34 "1: llock %0, [%1] \n" \
35 " scond %0, [%1] \n" 35 " " #asm_op " %0, %0, %2 \n" \
36 " bnz 1b \n" 36 " scond %0, [%1] \n" \
37 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ 37 " bnz 1b \n" \
38 : "r"(&v->counter), "ir"(i) 38 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
39 : "cc"); 39 : "r"(&v->counter), "ir"(i) \
40} 40 : "cc"); \
41 41} \
42static inline void atomic_sub(int i, atomic_t *v) 42
43{ 43#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
44 unsigned int temp; 44static inline int atomic_##op##_return(int i, atomic_t *v) \
45 45{ \
46 __asm__ __volatile__( 46 unsigned int temp; \
47 "1: llock %0, [%1] \n" 47 \
48 " sub %0, %0, %2 \n" 48 __asm__ __volatile__( \
49 " scond %0, [%1] \n" 49 "1: llock %0, [%1] \n" \
50 " bnz 1b \n" 50 " " #asm_op " %0, %0, %2 \n" \
51 : "=&r"(temp) 51 " scond %0, [%1] \n" \
52 : "r"(&v->counter), "ir"(i) 52 " bnz 1b \n" \
53 : "cc"); 53 : "=&r"(temp) \
54} 54 : "r"(&v->counter), "ir"(i) \
55 55 : "cc"); \
56/* add and also return the new value */ 56 \
57static inline int atomic_add_return(int i, atomic_t *v) 57 return temp; \
58{
59 unsigned int temp;
60
61 __asm__ __volatile__(
62 "1: llock %0, [%1] \n"
63 " add %0, %0, %2 \n"
64 " scond %0, [%1] \n"
65 " bnz 1b \n"
66 : "=&r"(temp)
67 : "r"(&v->counter), "ir"(i)
68 : "cc");
69
70 return temp;
71}
72
73static inline int atomic_sub_return(int i, atomic_t *v)
74{
75 unsigned int temp;
76
77 __asm__ __volatile__(
78 "1: llock %0, [%1] \n"
79 " sub %0, %0, %2 \n"
80 " scond %0, [%1] \n"
81 " bnz 1b \n"
82 : "=&r"(temp)
83 : "r"(&v->counter), "ir"(i)
84 : "cc");
85
86 return temp;
87}
88
89static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
90{
91 unsigned int temp;
92
93 __asm__ __volatile__(
94 "1: llock %0, [%1] \n"
95 " bic %0, %0, %2 \n"
96 " scond %0, [%1] \n"
97 " bnz 1b \n"
98 : "=&r"(temp)
99 : "r"(addr), "ir"(mask)
100 : "cc");
101} 58}
102 59
103#else /* !CONFIG_ARC_HAS_LLSC */ 60#else /* !CONFIG_ARC_HAS_LLSC */
@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i)
126 v->counter = i; 83 v->counter = i;
127 atomic_ops_unlock(flags); 84 atomic_ops_unlock(flags);
128} 85}
86
129#endif 87#endif
130 88
131/* 89/*
@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i)
133 * Locking would change to irq-disabling only (UP) and spinlocks (SMP) 91 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
134 */ 92 */
135 93
136static inline void atomic_add(int i, atomic_t *v) 94#define ATOMIC_OP(op, c_op, asm_op) \
137{ 95static inline void atomic_##op(int i, atomic_t *v) \
138 unsigned long flags; 96{ \
139 97 unsigned long flags; \
140 atomic_ops_lock(flags); 98 \
141 v->counter += i; 99 atomic_ops_lock(flags); \
142 atomic_ops_unlock(flags); 100 v->counter c_op i; \
101 atomic_ops_unlock(flags); \
143} 102}
144 103
145static inline void atomic_sub(int i, atomic_t *v) 104#define ATOMIC_OP_RETURN(op, c_op) \
146{ 105static inline int atomic_##op##_return(int i, atomic_t *v) \
147 unsigned long flags; 106{ \
148 107 unsigned long flags; \
149 atomic_ops_lock(flags); 108 unsigned long temp; \
150 v->counter -= i; 109 \
151 atomic_ops_unlock(flags); 110 atomic_ops_lock(flags); \
111 temp = v->counter; \
112 temp c_op i; \
113 v->counter = temp; \
114 atomic_ops_unlock(flags); \
115 \
116 return temp; \
152} 117}
153 118
154static inline int atomic_add_return(int i, atomic_t *v) 119#endif /* !CONFIG_ARC_HAS_LLSC */
155{
156 unsigned long flags;
157 unsigned long temp;
158
159 atomic_ops_lock(flags);
160 temp = v->counter;
161 temp += i;
162 v->counter = temp;
163 atomic_ops_unlock(flags);
164
165 return temp;
166}
167
168static inline int atomic_sub_return(int i, atomic_t *v)
169{
170 unsigned long flags;
171 unsigned long temp;
172
173 atomic_ops_lock(flags);
174 temp = v->counter;
175 temp -= i;
176 v->counter = temp;
177 atomic_ops_unlock(flags);
178 120
179 return temp; 121#define ATOMIC_OPS(op, c_op, asm_op) \
180} 122 ATOMIC_OP(op, c_op, asm_op) \
123 ATOMIC_OP_RETURN(op, c_op, asm_op)
181 124
182static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 125ATOMIC_OPS(add, +=, add)
183{ 126ATOMIC_OPS(sub, -=, sub)
184 unsigned long flags; 127ATOMIC_OP(and, &=, and)
185 128
186 atomic_ops_lock(flags); 129#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
187 *addr &= ~mask;
188 atomic_ops_unlock(flags);
189}
190 130
191#endif /* !CONFIG_ARC_HAS_LLSC */ 131#undef ATOMIC_OPS
132#undef ATOMIC_OP_RETURN
133#undef ATOMIC_OP
192 134
193/** 135/**
194 * __atomic_add_unless - add unless the number is a given value 136 * __atomic_add_unless - add unless the number is a given value