diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-01-18 04:42:16 -0500 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-02-11 09:30:30 -0500 |
commit | 14e968bad788de922a755a84b92cb29f8c1342e4 (patch) | |
tree | 5d1a38b177e2b63466ce00ee0165d04f04a2bceb /arch/arc/include/asm/atomic.h | |
parent | ac4c244d4e5d914f9a5642cdcc03b18780e55dbc (diff) |
ARC: Atomic/bitops/cmpxchg/barriers
This covers the UP / SMP (with no hardware assist for atomic r-m-w) as
well as ARC700 LLOCK/SCOND insns based.
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include/asm/atomic.h')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 232 |
1 files changed, 232 insertions, 0 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h new file mode 100644 index 000000000000..83f03ca6caf6 --- /dev/null +++ b/arch/arc/include/asm/atomic.h | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_ARC_ATOMIC_H | ||
10 | #define _ASM_ARC_ATOMIC_H | ||
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | |||
14 | #ifndef __ASSEMBLY__ | ||
15 | |||
16 | #include <linux/types.h> | ||
17 | #include <linux/compiler.h> | ||
18 | #include <asm/cmpxchg.h> | ||
19 | #include <asm/barrier.h> | ||
20 | #include <asm/smp.h> | ||
21 | |||
22 | #define atomic_read(v) ((v)->counter) | ||
23 | |||
24 | #ifdef CONFIG_ARC_HAS_LLSC | ||
25 | |||
26 | #define atomic_set(v, i) (((v)->counter) = (i)) | ||
27 | |||
28 | static inline void atomic_add(int i, atomic_t *v) | ||
29 | { | ||
30 | unsigned int temp; | ||
31 | |||
32 | __asm__ __volatile__( | ||
33 | "1: llock %0, [%1] \n" | ||
34 | " add %0, %0, %2 \n" | ||
35 | " scond %0, [%1] \n" | ||
36 | " bnz 1b \n" | ||
37 | : "=&r"(temp) /* Early clobber, to prevent reg reuse */ | ||
38 | : "r"(&v->counter), "ir"(i) | ||
39 | : "cc"); | ||
40 | } | ||
41 | |||
42 | static inline void atomic_sub(int i, atomic_t *v) | ||
43 | { | ||
44 | unsigned int temp; | ||
45 | |||
46 | __asm__ __volatile__( | ||
47 | "1: llock %0, [%1] \n" | ||
48 | " sub %0, %0, %2 \n" | ||
49 | " scond %0, [%1] \n" | ||
50 | " bnz 1b \n" | ||
51 | : "=&r"(temp) | ||
52 | : "r"(&v->counter), "ir"(i) | ||
53 | : "cc"); | ||
54 | } | ||
55 | |||
56 | /* add and also return the new value */ | ||
57 | static inline int atomic_add_return(int i, atomic_t *v) | ||
58 | { | ||
59 | unsigned int temp; | ||
60 | |||
61 | __asm__ __volatile__( | ||
62 | "1: llock %0, [%1] \n" | ||
63 | " add %0, %0, %2 \n" | ||
64 | " scond %0, [%1] \n" | ||
65 | " bnz 1b \n" | ||
66 | : "=&r"(temp) | ||
67 | : "r"(&v->counter), "ir"(i) | ||
68 | : "cc"); | ||
69 | |||
70 | return temp; | ||
71 | } | ||
72 | |||
73 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
74 | { | ||
75 | unsigned int temp; | ||
76 | |||
77 | __asm__ __volatile__( | ||
78 | "1: llock %0, [%1] \n" | ||
79 | " sub %0, %0, %2 \n" | ||
80 | " scond %0, [%1] \n" | ||
81 | " bnz 1b \n" | ||
82 | : "=&r"(temp) | ||
83 | : "r"(&v->counter), "ir"(i) | ||
84 | : "cc"); | ||
85 | |||
86 | return temp; | ||
87 | } | ||
88 | |||
89 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||
90 | { | ||
91 | unsigned int temp; | ||
92 | |||
93 | __asm__ __volatile__( | ||
94 | "1: llock %0, [%1] \n" | ||
95 | " bic %0, %0, %2 \n" | ||
96 | " scond %0, [%1] \n" | ||
97 | " bnz 1b \n" | ||
98 | : "=&r"(temp) | ||
99 | : "r"(addr), "ir"(mask) | ||
100 | : "cc"); | ||
101 | } | ||
102 | |||
103 | #else /* !CONFIG_ARC_HAS_LLSC */ | ||
104 | |||
105 | #ifndef CONFIG_SMP | ||
106 | |||
107 | /* violating atomic_xxx API locking protocol in UP for optimization sake */ | ||
108 | #define atomic_set(v, i) (((v)->counter) = (i)) | ||
109 | |||
110 | #else | ||
111 | |||
112 | static inline void atomic_set(atomic_t *v, int i) | ||
113 | { | ||
114 | /* | ||
115 | * Independent of hardware support, all of the atomic_xxx() APIs need | ||
116 | * to follow the same locking rules to make sure that a "hardware" | ||
117 | * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn | ||
118 | * sequence | ||
119 | * | ||
120 | * Thus atomic_set() despite being 1 insn (and seemingly atomic) | ||
121 | * requires the locking. | ||
122 | */ | ||
123 | unsigned long flags; | ||
124 | |||
125 | atomic_ops_lock(flags); | ||
126 | v->counter = i; | ||
127 | atomic_ops_unlock(flags); | ||
128 | } | ||
129 | #endif | ||
130 | |||
131 | /* | ||
132 | * Non hardware assisted Atomic-R-M-W | ||
133 | * Locking would change to irq-disabling only (UP) and spinlocks (SMP) | ||
134 | */ | ||
135 | |||
136 | static inline void atomic_add(int i, atomic_t *v) | ||
137 | { | ||
138 | unsigned long flags; | ||
139 | |||
140 | atomic_ops_lock(flags); | ||
141 | v->counter += i; | ||
142 | atomic_ops_unlock(flags); | ||
143 | } | ||
144 | |||
145 | static inline void atomic_sub(int i, atomic_t *v) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | |||
149 | atomic_ops_lock(flags); | ||
150 | v->counter -= i; | ||
151 | atomic_ops_unlock(flags); | ||
152 | } | ||
153 | |||
154 | static inline int atomic_add_return(int i, atomic_t *v) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | unsigned long temp; | ||
158 | |||
159 | atomic_ops_lock(flags); | ||
160 | temp = v->counter; | ||
161 | temp += i; | ||
162 | v->counter = temp; | ||
163 | atomic_ops_unlock(flags); | ||
164 | |||
165 | return temp; | ||
166 | } | ||
167 | |||
168 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
169 | { | ||
170 | unsigned long flags; | ||
171 | unsigned long temp; | ||
172 | |||
173 | atomic_ops_lock(flags); | ||
174 | temp = v->counter; | ||
175 | temp -= i; | ||
176 | v->counter = temp; | ||
177 | atomic_ops_unlock(flags); | ||
178 | |||
179 | return temp; | ||
180 | } | ||
181 | |||
182 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||
183 | { | ||
184 | unsigned long flags; | ||
185 | |||
186 | atomic_ops_lock(flags); | ||
187 | *addr &= ~mask; | ||
188 | atomic_ops_unlock(flags); | ||
189 | } | ||
190 | |||
191 | #endif /* !CONFIG_ARC_HAS_LLSC */ | ||
192 | |||
193 | /** | ||
194 | * __atomic_add_unless - add unless the number is a given value | ||
195 | * @v: pointer of type atomic_t | ||
196 | * @a: the amount to add to v... | ||
197 | * @u: ...unless v is equal to u. | ||
198 | * | ||
199 | * Atomically adds @a to @v, so long as it was not @u. | ||
200 | * Returns the old value of @v | ||
201 | */ | ||
202 | #define __atomic_add_unless(v, a, u) \ | ||
203 | ({ \ | ||
204 | int c, old; \ | ||
205 | c = atomic_read(v); \ | ||
206 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ | ||
207 | c = old; \ | ||
208 | c; \ | ||
209 | }) | ||
210 | |||
211 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
212 | |||
213 | #define atomic_inc(v) atomic_add(1, v) | ||
214 | #define atomic_dec(v) atomic_sub(1, v) | ||
215 | |||
216 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | ||
217 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | ||
218 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
219 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
220 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | ||
221 | |||
222 | #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) | ||
223 | |||
224 | #define ATOMIC_INIT(i) { (i) } | ||
225 | |||
226 | #include <asm-generic/atomic64.h> | ||
227 | |||
228 | #endif | ||
229 | |||
230 | #endif | ||
231 | |||
232 | #endif | ||