diff options
author | James Hogan <james.hogan@imgtec.com> | 2012-10-09 06:00:24 -0400 |
---|---|---|
committer | James Hogan <james.hogan@imgtec.com> | 2013-03-02 15:09:50 -0500 |
commit | 6006c0d8ce9441dd1363bf14f18a8e28d3588460 (patch) | |
tree | 786183053c89e11b3058b8a16f7953744b819340 | |
parent | 9b802d1f43978869fcd98e92b854fd8785cefee7 (diff) |
metag: Atomics, locks and bitops
Add header files to implement Meta hardware thread locks (used by some
other atomic operations), atomics, spinlocks, and bitops.
There are 2 main types of atomic primitives for metag (in addition to
IRQs off on UP):
- LOCK instructions provide locking between hardware threads.
- LNKGET/LNKSET instructions provide load-linked/store-conditional
operations allowing for lighter weight atomics on Meta2
LOCK instructions allow for hardware threads to acquire voluntary or
exclusive hardware thread locks:
- LOCK0 releases exclusive and voluntary lock from the running hardware
thread.
- LOCK1 acquires the voluntary hardware lock, blocking until it becomes
available.
- LOCK2 implies LOCK1, and additionally acquires the exclusive hardware
lock, blocking all other hardware threads from executing.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
-rw-r--r-- | arch/metag/include/asm/atomic.h | 53 | ||||
-rw-r--r-- | arch/metag/include/asm/atomic_lnkget.h | 234 | ||||
-rw-r--r-- | arch/metag/include/asm/atomic_lock1.h | 160 | ||||
-rw-r--r-- | arch/metag/include/asm/bitops.h | 132 | ||||
-rw-r--r-- | arch/metag/include/asm/cmpxchg.h | 65 | ||||
-rw-r--r-- | arch/metag/include/asm/cmpxchg_irq.h | 42 | ||||
-rw-r--r-- | arch/metag/include/asm/cmpxchg_lnkget.h | 86 | ||||
-rw-r--r-- | arch/metag/include/asm/cmpxchg_lock1.h | 48 | ||||
-rw-r--r-- | arch/metag/include/asm/global_lock.h | 100 | ||||
-rw-r--r-- | arch/metag/include/asm/spinlock.h | 22 | ||||
-rw-r--r-- | arch/metag/include/asm/spinlock_lnkget.h | 249 | ||||
-rw-r--r-- | arch/metag/include/asm/spinlock_lock1.h | 184 | ||||
-rw-r--r-- | arch/metag/include/asm/spinlock_types.h | 20 |
13 files changed, 1395 insertions, 0 deletions
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h new file mode 100644 index 000000000000..307ecd2bd9a1 --- /dev/null +++ b/arch/metag/include/asm/atomic.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef __ASM_METAG_ATOMIC_H | ||
2 | #define __ASM_METAG_ATOMIC_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <asm/cmpxchg.h> | ||
7 | |||
8 | #if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) | ||
9 | /* The simple UP case. */ | ||
10 | #include <asm-generic/atomic.h> | ||
11 | #else | ||
12 | |||
13 | #if defined(CONFIG_METAG_ATOMICITY_LOCK1) | ||
14 | #include <asm/atomic_lock1.h> | ||
15 | #else | ||
16 | #include <asm/atomic_lnkget.h> | ||
17 | #endif | ||
18 | |||
19 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
20 | |||
21 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
22 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
23 | |||
24 | /* | ||
25 | * atomic_inc_and_test - increment and test | ||
26 | * @v: pointer of type atomic_t | ||
27 | * | ||
28 | * Atomically increments @v by 1 | ||
29 | * and returns true if the result is zero, or false for all | ||
30 | * other cases. | ||
31 | */ | ||
32 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
33 | |||
34 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
35 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
36 | |||
37 | #define atomic_inc(v) atomic_add(1, (v)) | ||
38 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
39 | |||
40 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
41 | |||
42 | #define smp_mb__before_atomic_dec() barrier() | ||
43 | #define smp_mb__after_atomic_dec() barrier() | ||
44 | #define smp_mb__before_atomic_inc() barrier() | ||
45 | #define smp_mb__after_atomic_inc() barrier() | ||
46 | |||
47 | #endif | ||
48 | |||
49 | #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) | ||
50 | |||
51 | #include <asm-generic/atomic64.h> | ||
52 | |||
53 | #endif /* __ASM_METAG_ATOMIC_H */ | ||
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h new file mode 100644 index 000000000000..d2e60a18986c --- /dev/null +++ b/arch/metag/include/asm/atomic_lnkget.h | |||
@@ -0,0 +1,234 @@ | |||
1 | #ifndef __ASM_METAG_ATOMIC_LNKGET_H | ||
2 | #define __ASM_METAG_ATOMIC_LNKGET_H | ||
3 | |||
4 | #define ATOMIC_INIT(i) { (i) } | ||
5 | |||
6 | #define atomic_set(v, i) ((v)->counter = (i)) | ||
7 | |||
8 | #include <linux/compiler.h> | ||
9 | |||
10 | #include <asm/barrier.h> | ||
11 | |||
12 | /* | ||
13 | * None of these asm statements clobber memory as LNKSET writes around | ||
14 | * the cache so the memory it modifies cannot safely be read by any means | ||
15 | * other than these accessors. | ||
16 | */ | ||
17 | |||
18 | static inline int atomic_read(const atomic_t *v) | ||
19 | { | ||
20 | int temp; | ||
21 | |||
22 | asm volatile ( | ||
23 | "LNKGETD %0, [%1]\n" | ||
24 | : "=da" (temp) | ||
25 | : "da" (&v->counter)); | ||
26 | |||
27 | return temp; | ||
28 | } | ||
29 | |||
30 | static inline void atomic_add(int i, atomic_t *v) | ||
31 | { | ||
32 | int temp; | ||
33 | |||
34 | asm volatile ( | ||
35 | "1: LNKGETD %0, [%1]\n" | ||
36 | " ADD %0, %0, %2\n" | ||
37 | " LNKSETD [%1], %0\n" | ||
38 | " DEFR %0, TXSTAT\n" | ||
39 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
40 | " CMPT %0, #HI(0x02000000)\n" | ||
41 | " BNZ 1b\n" | ||
42 | : "=&d" (temp) | ||
43 | : "da" (&v->counter), "bd" (i) | ||
44 | : "cc"); | ||
45 | } | ||
46 | |||
47 | static inline void atomic_sub(int i, atomic_t *v) | ||
48 | { | ||
49 | int temp; | ||
50 | |||
51 | asm volatile ( | ||
52 | "1: LNKGETD %0, [%1]\n" | ||
53 | " SUB %0, %0, %2\n" | ||
54 | " LNKSETD [%1], %0\n" | ||
55 | " DEFR %0, TXSTAT\n" | ||
56 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
57 | " CMPT %0, #HI(0x02000000)\n" | ||
58 | " BNZ 1b\n" | ||
59 | : "=&d" (temp) | ||
60 | : "da" (&v->counter), "bd" (i) | ||
61 | : "cc"); | ||
62 | } | ||
63 | |||
64 | static inline int atomic_add_return(int i, atomic_t *v) | ||
65 | { | ||
66 | int result, temp; | ||
67 | |||
68 | smp_mb(); | ||
69 | |||
70 | asm volatile ( | ||
71 | "1: LNKGETD %1, [%2]\n" | ||
72 | " ADD %1, %1, %3\n" | ||
73 | " LNKSETD [%2], %1\n" | ||
74 | " DEFR %0, TXSTAT\n" | ||
75 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
76 | " CMPT %0, #HI(0x02000000)\n" | ||
77 | " BNZ 1b\n" | ||
78 | : "=&d" (temp), "=&da" (result) | ||
79 | : "da" (&v->counter), "bd" (i) | ||
80 | : "cc"); | ||
81 | |||
82 | smp_mb(); | ||
83 | |||
84 | return result; | ||
85 | } | ||
86 | |||
87 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
88 | { | ||
89 | int result, temp; | ||
90 | |||
91 | smp_mb(); | ||
92 | |||
93 | asm volatile ( | ||
94 | "1: LNKGETD %1, [%2]\n" | ||
95 | " SUB %1, %1, %3\n" | ||
96 | " LNKSETD [%2], %1\n" | ||
97 | " DEFR %0, TXSTAT\n" | ||
98 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
99 | " CMPT %0, #HI(0x02000000)\n" | ||
100 | " BNZ 1b\n" | ||
101 | : "=&d" (temp), "=&da" (result) | ||
102 | : "da" (&v->counter), "bd" (i) | ||
103 | : "cc"); | ||
104 | |||
105 | smp_mb(); | ||
106 | |||
107 | return result; | ||
108 | } | ||
109 | |||
110 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
111 | { | ||
112 | int temp; | ||
113 | |||
114 | asm volatile ( | ||
115 | "1: LNKGETD %0, [%1]\n" | ||
116 | " AND %0, %0, %2\n" | ||
117 | " LNKSETD [%1] %0\n" | ||
118 | " DEFR %0, TXSTAT\n" | ||
119 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
120 | " CMPT %0, #HI(0x02000000)\n" | ||
121 | " BNZ 1b\n" | ||
122 | : "=&d" (temp) | ||
123 | : "da" (&v->counter), "bd" (~mask) | ||
124 | : "cc"); | ||
125 | } | ||
126 | |||
127 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
128 | { | ||
129 | int temp; | ||
130 | |||
131 | asm volatile ( | ||
132 | "1: LNKGETD %0, [%1]\n" | ||
133 | " OR %0, %0, %2\n" | ||
134 | " LNKSETD [%1], %0\n" | ||
135 | " DEFR %0, TXSTAT\n" | ||
136 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
137 | " CMPT %0, #HI(0x02000000)\n" | ||
138 | " BNZ 1b\n" | ||
139 | : "=&d" (temp) | ||
140 | : "da" (&v->counter), "bd" (mask) | ||
141 | : "cc"); | ||
142 | } | ||
143 | |||
144 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
145 | { | ||
146 | int result, temp; | ||
147 | |||
148 | smp_mb(); | ||
149 | |||
150 | asm volatile ( | ||
151 | "1: LNKGETD %1, [%2]\n" | ||
152 | " CMP %1, %3\n" | ||
153 | " LNKSETDEQ [%2], %4\n" | ||
154 | " BNE 2f\n" | ||
155 | " DEFR %0, TXSTAT\n" | ||
156 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
157 | " CMPT %0, #HI(0x02000000)\n" | ||
158 | " BNZ 1b\n" | ||
159 | "2:\n" | ||
160 | : "=&d" (temp), "=&d" (result) | ||
161 | : "da" (&v->counter), "bd" (old), "da" (new) | ||
162 | : "cc"); | ||
163 | |||
164 | smp_mb(); | ||
165 | |||
166 | return result; | ||
167 | } | ||
168 | |||
169 | static inline int atomic_xchg(atomic_t *v, int new) | ||
170 | { | ||
171 | int temp, old; | ||
172 | |||
173 | asm volatile ( | ||
174 | "1: LNKGETD %1, [%2]\n" | ||
175 | " LNKSETD [%2], %3\n" | ||
176 | " DEFR %0, TXSTAT\n" | ||
177 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
178 | " CMPT %0, #HI(0x02000000)\n" | ||
179 | " BNZ 1b\n" | ||
180 | : "=&d" (temp), "=&d" (old) | ||
181 | : "da" (&v->counter), "da" (new) | ||
182 | : "cc"); | ||
183 | |||
184 | return old; | ||
185 | } | ||
186 | |||
187 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
188 | { | ||
189 | int result, temp; | ||
190 | |||
191 | smp_mb(); | ||
192 | |||
193 | asm volatile ( | ||
194 | "1: LNKGETD %1, [%2]\n" | ||
195 | " CMP %1, %3\n" | ||
196 | " ADD %0, %1, %4\n" | ||
197 | " LNKSETDNE [%2], %0\n" | ||
198 | " BEQ 2f\n" | ||
199 | " DEFR %0, TXSTAT\n" | ||
200 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
201 | " CMPT %0, #HI(0x02000000)\n" | ||
202 | " BNZ 1b\n" | ||
203 | "2:\n" | ||
204 | : "=&d" (temp), "=&d" (result) | ||
205 | : "da" (&v->counter), "bd" (u), "bd" (a) | ||
206 | : "cc"); | ||
207 | |||
208 | smp_mb(); | ||
209 | |||
210 | return result; | ||
211 | } | ||
212 | |||
213 | static inline int atomic_sub_if_positive(int i, atomic_t *v) | ||
214 | { | ||
215 | int result, temp; | ||
216 | |||
217 | asm volatile ( | ||
218 | "1: LNKGETD %1, [%2]\n" | ||
219 | " SUBS %1, %1, %3\n" | ||
220 | " LNKSETDGE [%2], %1\n" | ||
221 | " BLT 2f\n" | ||
222 | " DEFR %0, TXSTAT\n" | ||
223 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
224 | " CMPT %0, #HI(0x02000000)\n" | ||
225 | " BNZ 1b\n" | ||
226 | "2:\n" | ||
227 | : "=&d" (temp), "=&da" (result) | ||
228 | : "da" (&v->counter), "bd" (i) | ||
229 | : "cc"); | ||
230 | |||
231 | return result; | ||
232 | } | ||
233 | |||
234 | #endif /* __ASM_METAG_ATOMIC_LNKGET_H */ | ||
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h new file mode 100644 index 000000000000..e578955e674b --- /dev/null +++ b/arch/metag/include/asm/atomic_lock1.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef __ASM_METAG_ATOMIC_LOCK1_H | ||
2 | #define __ASM_METAG_ATOMIC_LOCK1_H | ||
3 | |||
4 | #define ATOMIC_INIT(i) { (i) } | ||
5 | |||
6 | #include <linux/compiler.h> | ||
7 | |||
8 | #include <asm/barrier.h> | ||
9 | #include <asm/global_lock.h> | ||
10 | |||
11 | static inline int atomic_read(const atomic_t *v) | ||
12 | { | ||
13 | return (v)->counter; | ||
14 | } | ||
15 | |||
16 | /* | ||
17 | * atomic_set needs to be take the lock to protect atomic_add_unless from a | ||
18 | * possible race, as it reads the counter twice: | ||
19 | * | ||
20 | * CPU0 CPU1 | ||
21 | * atomic_add_unless(1, 0) | ||
22 | * ret = v->counter (non-zero) | ||
23 | * if (ret != u) v->counter = 0 | ||
24 | * v->counter += 1 (counter set to 1) | ||
25 | * | ||
26 | * Making atomic_set take the lock ensures that ordering and logical | ||
27 | * consistency is preserved. | ||
28 | */ | ||
29 | static inline int atomic_set(atomic_t *v, int i) | ||
30 | { | ||
31 | unsigned long flags; | ||
32 | |||
33 | __global_lock1(flags); | ||
34 | fence(); | ||
35 | v->counter = i; | ||
36 | __global_unlock1(flags); | ||
37 | return i; | ||
38 | } | ||
39 | |||
40 | static inline void atomic_add(int i, atomic_t *v) | ||
41 | { | ||
42 | unsigned long flags; | ||
43 | |||
44 | __global_lock1(flags); | ||
45 | fence(); | ||
46 | v->counter += i; | ||
47 | __global_unlock1(flags); | ||
48 | } | ||
49 | |||
50 | static inline void atomic_sub(int i, atomic_t *v) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | |||
54 | __global_lock1(flags); | ||
55 | fence(); | ||
56 | v->counter -= i; | ||
57 | __global_unlock1(flags); | ||
58 | } | ||
59 | |||
60 | static inline int atomic_add_return(int i, atomic_t *v) | ||
61 | { | ||
62 | unsigned long result; | ||
63 | unsigned long flags; | ||
64 | |||
65 | __global_lock1(flags); | ||
66 | result = v->counter; | ||
67 | result += i; | ||
68 | fence(); | ||
69 | v->counter = result; | ||
70 | __global_unlock1(flags); | ||
71 | |||
72 | return result; | ||
73 | } | ||
74 | |||
75 | static inline int atomic_sub_return(int i, atomic_t *v) | ||
76 | { | ||
77 | unsigned long result; | ||
78 | unsigned long flags; | ||
79 | |||
80 | __global_lock1(flags); | ||
81 | result = v->counter; | ||
82 | result -= i; | ||
83 | fence(); | ||
84 | v->counter = result; | ||
85 | __global_unlock1(flags); | ||
86 | |||
87 | return result; | ||
88 | } | ||
89 | |||
90 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | |||
94 | __global_lock1(flags); | ||
95 | fence(); | ||
96 | v->counter &= ~mask; | ||
97 | __global_unlock1(flags); | ||
98 | } | ||
99 | |||
100 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | ||
101 | { | ||
102 | unsigned long flags; | ||
103 | |||
104 | __global_lock1(flags); | ||
105 | fence(); | ||
106 | v->counter |= mask; | ||
107 | __global_unlock1(flags); | ||
108 | } | ||
109 | |||
110 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
111 | { | ||
112 | int ret; | ||
113 | unsigned long flags; | ||
114 | |||
115 | __global_lock1(flags); | ||
116 | ret = v->counter; | ||
117 | if (ret == old) { | ||
118 | fence(); | ||
119 | v->counter = new; | ||
120 | } | ||
121 | __global_unlock1(flags); | ||
122 | |||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
127 | |||
128 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
129 | { | ||
130 | int ret; | ||
131 | unsigned long flags; | ||
132 | |||
133 | __global_lock1(flags); | ||
134 | ret = v->counter; | ||
135 | if (ret != u) { | ||
136 | fence(); | ||
137 | v->counter += a; | ||
138 | } | ||
139 | __global_unlock1(flags); | ||
140 | |||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | static inline int atomic_sub_if_positive(int i, atomic_t *v) | ||
145 | { | ||
146 | int ret; | ||
147 | unsigned long flags; | ||
148 | |||
149 | __global_lock1(flags); | ||
150 | ret = v->counter - 1; | ||
151 | if (ret >= 0) { | ||
152 | fence(); | ||
153 | v->counter = ret; | ||
154 | } | ||
155 | __global_unlock1(flags); | ||
156 | |||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | #endif /* __ASM_METAG_ATOMIC_LOCK1_H */ | ||
diff --git a/arch/metag/include/asm/bitops.h b/arch/metag/include/asm/bitops.h new file mode 100644 index 000000000000..c0d0df0d1378 --- /dev/null +++ b/arch/metag/include/asm/bitops.h | |||
@@ -0,0 +1,132 @@ | |||
1 | #ifndef __ASM_METAG_BITOPS_H | ||
2 | #define __ASM_METAG_BITOPS_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <asm/barrier.h> | ||
6 | #include <asm/global_lock.h> | ||
7 | |||
8 | /* | ||
9 | * clear_bit() doesn't provide any barrier for the compiler. | ||
10 | */ | ||
11 | #define smp_mb__before_clear_bit() barrier() | ||
12 | #define smp_mb__after_clear_bit() barrier() | ||
13 | |||
14 | #ifdef CONFIG_SMP | ||
15 | /* | ||
16 | * These functions are the basis of our bit ops. | ||
17 | */ | ||
18 | static inline void set_bit(unsigned int bit, volatile unsigned long *p) | ||
19 | { | ||
20 | unsigned long flags; | ||
21 | unsigned long mask = 1UL << (bit & 31); | ||
22 | |||
23 | p += bit >> 5; | ||
24 | |||
25 | __global_lock1(flags); | ||
26 | fence(); | ||
27 | *p |= mask; | ||
28 | __global_unlock1(flags); | ||
29 | } | ||
30 | |||
31 | static inline void clear_bit(unsigned int bit, volatile unsigned long *p) | ||
32 | { | ||
33 | unsigned long flags; | ||
34 | unsigned long mask = 1UL << (bit & 31); | ||
35 | |||
36 | p += bit >> 5; | ||
37 | |||
38 | __global_lock1(flags); | ||
39 | fence(); | ||
40 | *p &= ~mask; | ||
41 | __global_unlock1(flags); | ||
42 | } | ||
43 | |||
44 | static inline void change_bit(unsigned int bit, volatile unsigned long *p) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | unsigned long mask = 1UL << (bit & 31); | ||
48 | |||
49 | p += bit >> 5; | ||
50 | |||
51 | __global_lock1(flags); | ||
52 | fence(); | ||
53 | *p ^= mask; | ||
54 | __global_unlock1(flags); | ||
55 | } | ||
56 | |||
57 | static inline int test_and_set_bit(unsigned int bit, volatile unsigned long *p) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | unsigned long old; | ||
61 | unsigned long mask = 1UL << (bit & 31); | ||
62 | |||
63 | p += bit >> 5; | ||
64 | |||
65 | __global_lock1(flags); | ||
66 | old = *p; | ||
67 | if (!(old & mask)) { | ||
68 | fence(); | ||
69 | *p = old | mask; | ||
70 | } | ||
71 | __global_unlock1(flags); | ||
72 | |||
73 | return (old & mask) != 0; | ||
74 | } | ||
75 | |||
76 | static inline int test_and_clear_bit(unsigned int bit, | ||
77 | volatile unsigned long *p) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | unsigned long old; | ||
81 | unsigned long mask = 1UL << (bit & 31); | ||
82 | |||
83 | p += bit >> 5; | ||
84 | |||
85 | __global_lock1(flags); | ||
86 | old = *p; | ||
87 | if (old & mask) { | ||
88 | fence(); | ||
89 | *p = old & ~mask; | ||
90 | } | ||
91 | __global_unlock1(flags); | ||
92 | |||
93 | return (old & mask) != 0; | ||
94 | } | ||
95 | |||
96 | static inline int test_and_change_bit(unsigned int bit, | ||
97 | volatile unsigned long *p) | ||
98 | { | ||
99 | unsigned long flags; | ||
100 | unsigned long old; | ||
101 | unsigned long mask = 1UL << (bit & 31); | ||
102 | |||
103 | p += bit >> 5; | ||
104 | |||
105 | __global_lock1(flags); | ||
106 | fence(); | ||
107 | old = *p; | ||
108 | *p = old ^ mask; | ||
109 | __global_unlock1(flags); | ||
110 | |||
111 | return (old & mask) != 0; | ||
112 | } | ||
113 | |||
114 | #else | ||
115 | #include <asm-generic/bitops/atomic.h> | ||
116 | #endif /* CONFIG_SMP */ | ||
117 | |||
118 | #include <asm-generic/bitops/non-atomic.h> | ||
119 | #include <asm-generic/bitops/find.h> | ||
120 | #include <asm-generic/bitops/ffs.h> | ||
121 | #include <asm-generic/bitops/__ffs.h> | ||
122 | #include <asm-generic/bitops/ffz.h> | ||
123 | #include <asm-generic/bitops/fls.h> | ||
124 | #include <asm-generic/bitops/__fls.h> | ||
125 | #include <asm-generic/bitops/fls64.h> | ||
126 | #include <asm-generic/bitops/hweight.h> | ||
127 | #include <asm-generic/bitops/lock.h> | ||
128 | #include <asm-generic/bitops/sched.h> | ||
129 | #include <asm-generic/bitops/le.h> | ||
130 | #include <asm-generic/bitops/ext2-atomic.h> | ||
131 | |||
132 | #endif /* __ASM_METAG_BITOPS_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg.h b/arch/metag/include/asm/cmpxchg.h new file mode 100644 index 000000000000..b1bc1be8540f --- /dev/null +++ b/arch/metag/include/asm/cmpxchg.h | |||
@@ -0,0 +1,65 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_H | ||
2 | #define __ASM_METAG_CMPXCHG_H | ||
3 | |||
4 | #include <asm/barrier.h> | ||
5 | |||
6 | #if defined(CONFIG_METAG_ATOMICITY_IRQSOFF) | ||
7 | #include <asm/cmpxchg_irq.h> | ||
8 | #elif defined(CONFIG_METAG_ATOMICITY_LOCK1) | ||
9 | #include <asm/cmpxchg_lock1.h> | ||
10 | #elif defined(CONFIG_METAG_ATOMICITY_LNKGET) | ||
11 | #include <asm/cmpxchg_lnkget.h> | ||
12 | #endif | ||
13 | |||
14 | extern void __xchg_called_with_bad_pointer(void); | ||
15 | |||
16 | #define __xchg(ptr, x, size) \ | ||
17 | ({ \ | ||
18 | unsigned long __xchg__res; \ | ||
19 | volatile void *__xchg_ptr = (ptr); \ | ||
20 | switch (size) { \ | ||
21 | case 4: \ | ||
22 | __xchg__res = xchg_u32(__xchg_ptr, x); \ | ||
23 | break; \ | ||
24 | case 1: \ | ||
25 | __xchg__res = xchg_u8(__xchg_ptr, x); \ | ||
26 | break; \ | ||
27 | default: \ | ||
28 | __xchg_called_with_bad_pointer(); \ | ||
29 | __xchg__res = x; \ | ||
30 | break; \ | ||
31 | } \ | ||
32 | \ | ||
33 | __xchg__res; \ | ||
34 | }) | ||
35 | |||
36 | #define xchg(ptr, x) \ | ||
37 | ((__typeof__(*(ptr)))__xchg((ptr), (unsigned long)(x), sizeof(*(ptr)))) | ||
38 | |||
39 | /* This function doesn't exist, so you'll get a linker error | ||
40 | * if something tries to do an invalid cmpxchg(). */ | ||
41 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
42 | |||
43 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | ||
44 | unsigned long new, int size) | ||
45 | { | ||
46 | switch (size) { | ||
47 | case 4: | ||
48 | return __cmpxchg_u32(ptr, old, new); | ||
49 | } | ||
50 | __cmpxchg_called_with_bad_pointer(); | ||
51 | return old; | ||
52 | } | ||
53 | |||
54 | #define __HAVE_ARCH_CMPXCHG 1 | ||
55 | |||
56 | #define cmpxchg(ptr, o, n) \ | ||
57 | ({ \ | ||
58 | __typeof__(*(ptr)) _o_ = (o); \ | ||
59 | __typeof__(*(ptr)) _n_ = (n); \ | ||
60 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
61 | (unsigned long)_n_, \ | ||
62 | sizeof(*(ptr))); \ | ||
63 | }) | ||
64 | |||
65 | #endif /* __ASM_METAG_CMPXCHG_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg_irq.h b/arch/metag/include/asm/cmpxchg_irq.h new file mode 100644 index 000000000000..649573168b05 --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_irq.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_IRQ_H | ||
2 | #define __ASM_METAG_CMPXCHG_IRQ_H | ||
3 | |||
4 | #include <linux/irqflags.h> | ||
5 | |||
6 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
7 | { | ||
8 | unsigned long flags, retval; | ||
9 | |||
10 | local_irq_save(flags); | ||
11 | retval = *m; | ||
12 | *m = val; | ||
13 | local_irq_restore(flags); | ||
14 | return retval; | ||
15 | } | ||
16 | |||
17 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
18 | { | ||
19 | unsigned long flags, retval; | ||
20 | |||
21 | local_irq_save(flags); | ||
22 | retval = *m; | ||
23 | *m = val & 0xff; | ||
24 | local_irq_restore(flags); | ||
25 | return retval; | ||
26 | } | ||
27 | |||
28 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
29 | unsigned long new) | ||
30 | { | ||
31 | __u32 retval; | ||
32 | unsigned long flags; | ||
33 | |||
34 | local_irq_save(flags); | ||
35 | retval = *m; | ||
36 | if (retval == old) | ||
37 | *m = new; | ||
38 | local_irq_restore(flags); /* implies memory barrier */ | ||
39 | return retval; | ||
40 | } | ||
41 | |||
42 | #endif /* __ASM_METAG_CMPXCHG_IRQ_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h new file mode 100644 index 000000000000..0154e2807ebb --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_lnkget.h | |||
@@ -0,0 +1,86 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_LNKGET_H | ||
2 | #define __ASM_METAG_CMPXCHG_LNKGET_H | ||
3 | |||
4 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
5 | { | ||
6 | int temp, old; | ||
7 | |||
8 | smp_mb(); | ||
9 | |||
10 | asm volatile ( | ||
11 | "1: LNKGETD %1, [%2]\n" | ||
12 | " LNKSETD [%2], %3\n" | ||
13 | " DEFR %0, TXSTAT\n" | ||
14 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
15 | " CMPT %0, #HI(0x02000000)\n" | ||
16 | " BNZ 1b\n" | ||
17 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
18 | " DCACHE [%2], %0\n" | ||
19 | #endif | ||
20 | : "=&d" (temp), "=&d" (old) | ||
21 | : "da" (m), "da" (val) | ||
22 | : "cc" | ||
23 | ); | ||
24 | |||
25 | smp_mb(); | ||
26 | |||
27 | return old; | ||
28 | } | ||
29 | |||
30 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
31 | { | ||
32 | int temp, old; | ||
33 | |||
34 | smp_mb(); | ||
35 | |||
36 | asm volatile ( | ||
37 | "1: LNKGETD %1, [%2]\n" | ||
38 | " LNKSETD [%2], %3\n" | ||
39 | " DEFR %0, TXSTAT\n" | ||
40 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
41 | " CMPT %0, #HI(0x02000000)\n" | ||
42 | " BNZ 1b\n" | ||
43 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
44 | " DCACHE [%2], %0\n" | ||
45 | #endif | ||
46 | : "=&d" (temp), "=&d" (old) | ||
47 | : "da" (m), "da" (val & 0xff) | ||
48 | : "cc" | ||
49 | ); | ||
50 | |||
51 | smp_mb(); | ||
52 | |||
53 | return old; | ||
54 | } | ||
55 | |||
56 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
57 | unsigned long new) | ||
58 | { | ||
59 | __u32 retval, temp; | ||
60 | |||
61 | smp_mb(); | ||
62 | |||
63 | asm volatile ( | ||
64 | "1: LNKGETD %1, [%2]\n" | ||
65 | " CMP %1, %3\n" | ||
66 | " LNKSETDEQ [%2], %4\n" | ||
67 | " BNE 2f\n" | ||
68 | " DEFR %0, TXSTAT\n" | ||
69 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
70 | " CMPT %0, #HI(0x02000000)\n" | ||
71 | " BNZ 1b\n" | ||
72 | #ifdef CONFIG_METAG_LNKGET_AROUND_CACHE | ||
73 | " DCACHE [%2], %0\n" | ||
74 | #endif | ||
75 | "2:\n" | ||
76 | : "=&d" (temp), "=&da" (retval) | ||
77 | : "da" (m), "bd" (old), "da" (new) | ||
78 | : "cc" | ||
79 | ); | ||
80 | |||
81 | smp_mb(); | ||
82 | |||
83 | return retval; | ||
84 | } | ||
85 | |||
86 | #endif /* __ASM_METAG_CMPXCHG_LNKGET_H */ | ||
diff --git a/arch/metag/include/asm/cmpxchg_lock1.h b/arch/metag/include/asm/cmpxchg_lock1.h new file mode 100644 index 000000000000..fd6850474969 --- /dev/null +++ b/arch/metag/include/asm/cmpxchg_lock1.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __ASM_METAG_CMPXCHG_LOCK1_H | ||
2 | #define __ASM_METAG_CMPXCHG_LOCK1_H | ||
3 | |||
4 | #include <asm/global_lock.h> | ||
5 | |||
6 | /* Use LOCK2 as these have to be atomic w.r.t. ordinary accesses. */ | ||
7 | |||
8 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
9 | { | ||
10 | unsigned long flags, retval; | ||
11 | |||
12 | __global_lock2(flags); | ||
13 | fence(); | ||
14 | retval = *m; | ||
15 | *m = val; | ||
16 | __global_unlock2(flags); | ||
17 | return retval; | ||
18 | } | ||
19 | |||
20 | static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) | ||
21 | { | ||
22 | unsigned long flags, retval; | ||
23 | |||
24 | __global_lock2(flags); | ||
25 | fence(); | ||
26 | retval = *m; | ||
27 | *m = val & 0xff; | ||
28 | __global_unlock2(flags); | ||
29 | return retval; | ||
30 | } | ||
31 | |||
32 | static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, | ||
33 | unsigned long new) | ||
34 | { | ||
35 | __u32 retval; | ||
36 | unsigned long flags; | ||
37 | |||
38 | __global_lock2(flags); | ||
39 | retval = *m; | ||
40 | if (retval == old) { | ||
41 | fence(); | ||
42 | *m = new; | ||
43 | } | ||
44 | __global_unlock2(flags); | ||
45 | return retval; | ||
46 | } | ||
47 | |||
48 | #endif /* __ASM_METAG_CMPXCHG_LOCK1_H */ | ||
diff --git a/arch/metag/include/asm/global_lock.h b/arch/metag/include/asm/global_lock.h new file mode 100644 index 000000000000..fc831c88c22a --- /dev/null +++ b/arch/metag/include/asm/global_lock.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #ifndef __ASM_METAG_GLOBAL_LOCK_H | ||
2 | #define __ASM_METAG_GLOBAL_LOCK_H | ||
3 | |||
4 | #include <asm/metag_mem.h> | ||
5 | |||
6 | /** | ||
7 | * __global_lock1() - Acquire global voluntary lock (LOCK1). | ||
8 | * @flags: Variable to store flags into. | ||
9 | * | ||
10 | * Acquires the Meta global voluntary lock (LOCK1), also taking care to disable | ||
11 | * all triggers so we cannot be interrupted, and to enforce a compiler barrier | ||
12 | * so that the compiler cannot reorder memory accesses across the lock. | ||
13 | * | ||
14 | * No other hardware thread will be able to acquire the voluntary or exclusive | ||
15 | * locks until the voluntary lock is released with @__global_unlock1, but they | ||
16 | * may continue to execute as long as they aren't trying to acquire either of | ||
17 | * the locks. | ||
18 | */ | ||
19 | #define __global_lock1(flags) do { \ | ||
20 | unsigned int __trval; \ | ||
21 | asm volatile("MOV %0,#0\n\t" \ | ||
22 | "SWAP %0,TXMASKI\n\t" \ | ||
23 | "LOCK1" \ | ||
24 | : "=r" (__trval) \ | ||
25 | : \ | ||
26 | : "memory"); \ | ||
27 | (flags) = __trval; \ | ||
28 | } while (0) | ||
29 | |||
30 | /** | ||
31 | * __global_unlock1() - Release global voluntary lock (LOCK1). | ||
32 | * @flags: Variable to restore flags from. | ||
33 | * | ||
34 | * Releases the Meta global voluntary lock (LOCK1) acquired with | ||
35 | * @__global_lock1, also taking care to re-enable triggers, and to enforce a | ||
36 | * compiler barrier so that the compiler cannot reorder memory accesses across | ||
37 | * the unlock. | ||
38 | * | ||
39 | * This immediately allows another hardware thread to acquire the voluntary or | ||
40 | * exclusive locks. | ||
41 | */ | ||
42 | #define __global_unlock1(flags) do { \ | ||
43 | unsigned int __trval = (flags); \ | ||
44 | asm volatile("LOCK0\n\t" \ | ||
45 | "MOV TXMASKI,%0" \ | ||
46 | : \ | ||
47 | : "r" (__trval) \ | ||
48 | : "memory"); \ | ||
49 | } while (0) | ||
50 | |||
51 | /** | ||
52 | * __global_lock2() - Acquire global exclusive lock (LOCK2). | ||
53 | * @flags: Variable to store flags into. | ||
54 | * | ||
55 | * Acquires the Meta global voluntary lock and global exclusive lock (LOCK2), | ||
56 | * also taking care to disable all triggers so we cannot be interrupted, to take | ||
57 | * the atomic lock (system event) and to enforce a compiler barrier so that the | ||
58 | * compiler cannot reorder memory accesses across the lock. | ||
59 | * | ||
60 | * No other hardware thread will be able to execute code until the locks are | ||
61 | * released with @__global_unlock2. | ||
62 | */ | ||
63 | #define __global_lock2(flags) do { \ | ||
64 | unsigned int __trval; \ | ||
65 | unsigned int __aloc_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ | ||
66 | asm volatile("MOV %0,#0\n\t" \ | ||
67 | "SWAP %0,TXMASKI\n\t" \ | ||
68 | "LOCK2\n\t" \ | ||
69 | "SETD [%1+#0x40],D1RtP" \ | ||
70 | : "=r&" (__trval) \ | ||
71 | : "u" (__aloc_hi) \ | ||
72 | : "memory"); \ | ||
73 | (flags) = __trval; \ | ||
74 | } while (0) | ||
75 | |||
76 | /** | ||
77 | * __global_unlock2() - Release global exclusive lock (LOCK2). | ||
78 | * @flags: Variable to restore flags from. | ||
79 | * | ||
80 | * Releases the Meta global exclusive lock (LOCK2) and global voluntary lock | ||
81 | * acquired with @__global_lock2, also taking care to release the atomic lock | ||
82 | * (system event), re-enable triggers, and to enforce a compiler barrier so that | ||
83 | * the compiler cannot reorder memory accesses across the unlock. | ||
84 | * | ||
85 | * This immediately allows other hardware threads to continue executing and one | ||
86 | * of them to acquire locks. | ||
87 | */ | ||
88 | #define __global_unlock2(flags) do { \ | ||
89 | unsigned int __trval = (flags); \ | ||
90 | unsigned int __alock_hi = LINSYSEVENT_WR_ATOMIC_LOCK & 0xFFFF0000; \ | ||
91 | asm volatile("SETD [%1+#0x00],D1RtP\n\t" \ | ||
92 | "LOCK0\n\t" \ | ||
93 | "MOV TXMASKI,%0" \ | ||
94 | : \ | ||
95 | : "r" (__trval), \ | ||
96 | "u" (__alock_hi) \ | ||
97 | : "memory"); \ | ||
98 | } while (0) | ||
99 | |||
100 | #endif /* __ASM_METAG_GLOBAL_LOCK_H */ | ||
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h new file mode 100644 index 000000000000..86a7cf3d1386 --- /dev/null +++ b/arch/metag/include/asm/spinlock.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #ifdef CONFIG_METAG_ATOMICITY_LOCK1 | ||
5 | #include <asm/spinlock_lock1.h> | ||
6 | #else | ||
7 | #include <asm/spinlock_lnkget.h> | ||
8 | #endif | ||
9 | |||
10 | #define arch_spin_unlock_wait(lock) \ | ||
11 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
12 | |||
13 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
14 | |||
15 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
16 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
17 | |||
18 | #define arch_spin_relax(lock) cpu_relax() | ||
19 | #define arch_read_relax(lock) cpu_relax() | ||
20 | #define arch_write_relax(lock) cpu_relax() | ||
21 | |||
22 | #endif /* __ASM_SPINLOCK_H */ | ||
diff --git a/arch/metag/include/asm/spinlock_lnkget.h b/arch/metag/include/asm/spinlock_lnkget.h new file mode 100644 index 000000000000..ad8436feed8d --- /dev/null +++ b/arch/metag/include/asm/spinlock_lnkget.h | |||
@@ -0,0 +1,249 @@ | |||
1 | #ifndef __ASM_SPINLOCK_LNKGET_H | ||
2 | #define __ASM_SPINLOCK_LNKGET_H | ||
3 | |||
4 | /* | ||
5 | * None of these asm statements clobber memory as LNKSET writes around | ||
6 | * the cache so the memory it modifies cannot safely be read by any means | ||
7 | * other than these accessors. | ||
8 | */ | ||
9 | |||
10 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
11 | { | ||
12 | int ret; | ||
13 | |||
14 | asm volatile ("LNKGETD %0, [%1]\n" | ||
15 | "TST %0, #1\n" | ||
16 | "MOV %0, #1\n" | ||
17 | "XORZ %0, %0, %0\n" | ||
18 | : "=&d" (ret) | ||
19 | : "da" (&lock->lock) | ||
20 | : "cc"); | ||
21 | return ret; | ||
22 | } | ||
23 | |||
24 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
25 | { | ||
26 | int tmp; | ||
27 | |||
28 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
29 | " TST %0, #1\n" | ||
30 | " ADD %0, %0, #1\n" | ||
31 | " LNKSETDZ [%1], %0\n" | ||
32 | " BNZ 1b\n" | ||
33 | " DEFR %0, TXSTAT\n" | ||
34 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
35 | " CMPT %0, #HI(0x02000000)\n" | ||
36 | " BNZ 1b\n" | ||
37 | : "=&d" (tmp) | ||
38 | : "da" (&lock->lock) | ||
39 | : "cc"); | ||
40 | |||
41 | smp_mb(); | ||
42 | } | ||
43 | |||
44 | /* Returns 0 if failed to acquire lock */ | ||
45 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
46 | { | ||
47 | int tmp; | ||
48 | |||
49 | asm volatile (" LNKGETD %0,[%1]\n" | ||
50 | " TST %0, #1\n" | ||
51 | " ADD %0, %0, #1\n" | ||
52 | " LNKSETDZ [%1], %0\n" | ||
53 | " BNZ 1f\n" | ||
54 | " DEFR %0, TXSTAT\n" | ||
55 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
56 | " CMPT %0, #HI(0x02000000)\n" | ||
57 | " MOV %0, #1\n" | ||
58 | "1: XORNZ %0, %0, %0\n" | ||
59 | : "=&d" (tmp) | ||
60 | : "da" (&lock->lock) | ||
61 | : "cc"); | ||
62 | |||
63 | smp_mb(); | ||
64 | |||
65 | return tmp; | ||
66 | } | ||
67 | |||
68 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
69 | { | ||
70 | smp_mb(); | ||
71 | |||
72 | asm volatile (" SETD [%0], %1\n" | ||
73 | : | ||
74 | : "da" (&lock->lock), "da" (0) | ||
75 | : "memory"); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * RWLOCKS | ||
80 | * | ||
81 | * | ||
82 | * Write locks are easy - we just set bit 31. When unlocking, we can | ||
83 | * just write zero since the lock is exclusively held. | ||
84 | */ | ||
85 | |||
86 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
87 | { | ||
88 | int tmp; | ||
89 | |||
90 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
91 | " CMP %0, #0\n" | ||
92 | " ADD %0, %0, %2\n" | ||
93 | " LNKSETDZ [%1], %0\n" | ||
94 | " BNZ 1b\n" | ||
95 | " DEFR %0, TXSTAT\n" | ||
96 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
97 | " CMPT %0, #HI(0x02000000)\n" | ||
98 | " BNZ 1b\n" | ||
99 | : "=&d" (tmp) | ||
100 | : "da" (&rw->lock), "bd" (0x80000000) | ||
101 | : "cc"); | ||
102 | |||
103 | smp_mb(); | ||
104 | } | ||
105 | |||
106 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
107 | { | ||
108 | int tmp; | ||
109 | |||
110 | asm volatile (" LNKGETD %0,[%1]\n" | ||
111 | " CMP %0, #0\n" | ||
112 | " ADD %0, %0, %2\n" | ||
113 | " LNKSETDZ [%1], %0\n" | ||
114 | " BNZ 1f\n" | ||
115 | " DEFR %0, TXSTAT\n" | ||
116 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
117 | " CMPT %0, #HI(0x02000000)\n" | ||
118 | " MOV %0,#1\n" | ||
119 | "1: XORNZ %0, %0, %0\n" | ||
120 | : "=&d" (tmp) | ||
121 | : "da" (&rw->lock), "bd" (0x80000000) | ||
122 | : "cc"); | ||
123 | |||
124 | smp_mb(); | ||
125 | |||
126 | return tmp; | ||
127 | } | ||
128 | |||
129 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
130 | { | ||
131 | smp_mb(); | ||
132 | |||
133 | asm volatile (" SETD [%0], %1\n" | ||
134 | : | ||
135 | : "da" (&rw->lock), "da" (0) | ||
136 | : "memory"); | ||
137 | } | ||
138 | |||
139 | /* write_can_lock - would write_trylock() succeed? */ | ||
140 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
141 | { | ||
142 | int ret; | ||
143 | |||
144 | asm volatile ("LNKGETD %0, [%1]\n" | ||
145 | "CMP %0, #0\n" | ||
146 | "MOV %0, #1\n" | ||
147 | "XORNZ %0, %0, %0\n" | ||
148 | : "=&d" (ret) | ||
149 | : "da" (&rw->lock) | ||
150 | : "cc"); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Read locks are a bit more hairy: | ||
156 | * - Exclusively load the lock value. | ||
157 | * - Increment it. | ||
158 | * - Store new lock value if positive, and we still own this location. | ||
159 | * If the value is negative, we've already failed. | ||
160 | * - If we failed to store the value, we want a negative result. | ||
161 | * - If we failed, try again. | ||
162 | * Unlocking is similarly hairy. We may have multiple read locks | ||
163 | * currently active. However, we know we won't have any write | ||
164 | * locks. | ||
165 | */ | ||
166 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
167 | { | ||
168 | int tmp; | ||
169 | |||
170 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
171 | " ADDS %0, %0, #1\n" | ||
172 | " LNKSETDPL [%1], %0\n" | ||
173 | " BMI 1b\n" | ||
174 | " DEFR %0, TXSTAT\n" | ||
175 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
176 | " CMPT %0, #HI(0x02000000)\n" | ||
177 | " BNZ 1b\n" | ||
178 | : "=&d" (tmp) | ||
179 | : "da" (&rw->lock) | ||
180 | : "cc"); | ||
181 | |||
182 | smp_mb(); | ||
183 | } | ||
184 | |||
185 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
186 | { | ||
187 | int tmp; | ||
188 | |||
189 | smp_mb(); | ||
190 | |||
191 | asm volatile ("1: LNKGETD %0,[%1]\n" | ||
192 | " SUB %0, %0, #1\n" | ||
193 | " LNKSETD [%1], %0\n" | ||
194 | " DEFR %0, TXSTAT\n" | ||
195 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
196 | " CMPT %0, #HI(0x02000000)\n" | ||
197 | " BNZ 1b\n" | ||
198 | : "=&d" (tmp) | ||
199 | : "da" (&rw->lock) | ||
200 | : "cc", "memory"); | ||
201 | } | ||
202 | |||
203 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
204 | { | ||
205 | int tmp; | ||
206 | |||
207 | asm volatile (" LNKGETD %0,[%1]\n" | ||
208 | " ADDS %0, %0, #1\n" | ||
209 | " LNKSETDPL [%1], %0\n" | ||
210 | " BMI 1f\n" | ||
211 | " DEFR %0, TXSTAT\n" | ||
212 | " ANDT %0, %0, #HI(0x3f000000)\n" | ||
213 | " CMPT %0, #HI(0x02000000)\n" | ||
214 | " MOV %0,#1\n" | ||
215 | " BZ 2f\n" | ||
216 | "1: MOV %0,#0\n" | ||
217 | "2:\n" | ||
218 | : "=&d" (tmp) | ||
219 | : "da" (&rw->lock) | ||
220 | : "cc"); | ||
221 | |||
222 | smp_mb(); | ||
223 | |||
224 | return tmp; | ||
225 | } | ||
226 | |||
227 | /* read_can_lock - would read_trylock() succeed? */ | ||
228 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
229 | { | ||
230 | int tmp; | ||
231 | |||
232 | asm volatile ("LNKGETD %0, [%1]\n" | ||
233 | "CMP %0, %2\n" | ||
234 | "MOV %0, #1\n" | ||
235 | "XORZ %0, %0, %0\n" | ||
236 | : "=&d" (tmp) | ||
237 | : "da" (&rw->lock), "bd" (0x80000000) | ||
238 | : "cc"); | ||
239 | return tmp; | ||
240 | } | ||
241 | |||
242 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
243 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
244 | |||
245 | #define arch_spin_relax(lock) cpu_relax() | ||
246 | #define arch_read_relax(lock) cpu_relax() | ||
247 | #define arch_write_relax(lock) cpu_relax() | ||
248 | |||
249 | #endif /* __ASM_SPINLOCK_LNKGET_H */ | ||
diff --git a/arch/metag/include/asm/spinlock_lock1.h b/arch/metag/include/asm/spinlock_lock1.h new file mode 100644 index 000000000000..c630444cffe9 --- /dev/null +++ b/arch/metag/include/asm/spinlock_lock1.h | |||
@@ -0,0 +1,184 @@ | |||
1 | #ifndef __ASM_SPINLOCK_LOCK1_H | ||
2 | #define __ASM_SPINLOCK_LOCK1_H | ||
3 | |||
4 | #include <asm/bug.h> | ||
5 | #include <asm/global_lock.h> | ||
6 | |||
7 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
8 | { | ||
9 | int ret; | ||
10 | |||
11 | barrier(); | ||
12 | ret = lock->lock; | ||
13 | WARN_ON(ret != 0 && ret != 1); | ||
14 | return ret; | ||
15 | } | ||
16 | |||
17 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
18 | { | ||
19 | unsigned int we_won = 0; | ||
20 | unsigned long flags; | ||
21 | |||
22 | again: | ||
23 | __global_lock1(flags); | ||
24 | if (lock->lock == 0) { | ||
25 | fence(); | ||
26 | lock->lock = 1; | ||
27 | we_won = 1; | ||
28 | } | ||
29 | __global_unlock1(flags); | ||
30 | if (we_won == 0) | ||
31 | goto again; | ||
32 | WARN_ON(lock->lock != 1); | ||
33 | } | ||
34 | |||
35 | /* Returns 0 if failed to acquire lock */ | ||
36 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
37 | { | ||
38 | unsigned long flags; | ||
39 | unsigned int ret; | ||
40 | |||
41 | __global_lock1(flags); | ||
42 | ret = lock->lock; | ||
43 | if (ret == 0) { | ||
44 | fence(); | ||
45 | lock->lock = 1; | ||
46 | } | ||
47 | __global_unlock1(flags); | ||
48 | return (ret == 0); | ||
49 | } | ||
50 | |||
51 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
52 | { | ||
53 | barrier(); | ||
54 | WARN_ON(!lock->lock); | ||
55 | lock->lock = 0; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * RWLOCKS | ||
60 | * | ||
61 | * | ||
62 | * Write locks are easy - we just set bit 31. When unlocking, we can | ||
63 | * just write zero since the lock is exclusively held. | ||
64 | */ | ||
65 | |||
66 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | unsigned int we_won = 0; | ||
70 | |||
71 | again: | ||
72 | __global_lock1(flags); | ||
73 | if (rw->lock == 0) { | ||
74 | fence(); | ||
75 | rw->lock = 0x80000000; | ||
76 | we_won = 1; | ||
77 | } | ||
78 | __global_unlock1(flags); | ||
79 | if (we_won == 0) | ||
80 | goto again; | ||
81 | WARN_ON(rw->lock != 0x80000000); | ||
82 | } | ||
83 | |||
84 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
85 | { | ||
86 | unsigned long flags; | ||
87 | unsigned int ret; | ||
88 | |||
89 | __global_lock1(flags); | ||
90 | ret = rw->lock; | ||
91 | if (ret == 0) { | ||
92 | fence(); | ||
93 | rw->lock = 0x80000000; | ||
94 | } | ||
95 | __global_unlock1(flags); | ||
96 | |||
97 | return (ret == 0); | ||
98 | } | ||
99 | |||
100 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
101 | { | ||
102 | barrier(); | ||
103 | WARN_ON(rw->lock != 0x80000000); | ||
104 | rw->lock = 0; | ||
105 | } | ||
106 | |||
107 | /* write_can_lock - would write_trylock() succeed? */ | ||
108 | static inline int arch_write_can_lock(arch_rwlock_t *rw) | ||
109 | { | ||
110 | unsigned int ret; | ||
111 | |||
112 | barrier(); | ||
113 | ret = rw->lock; | ||
114 | return (ret == 0); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Read locks are a bit more hairy: | ||
119 | * - Exclusively load the lock value. | ||
120 | * - Increment it. | ||
121 | * - Store new lock value if positive, and we still own this location. | ||
122 | * If the value is negative, we've already failed. | ||
123 | * - If we failed to store the value, we want a negative result. | ||
124 | * - If we failed, try again. | ||
125 | * Unlocking is similarly hairy. We may have multiple read locks | ||
126 | * currently active. However, we know we won't have any write | ||
127 | * locks. | ||
128 | */ | ||
129 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | unsigned int we_won = 0, ret; | ||
133 | |||
134 | again: | ||
135 | __global_lock1(flags); | ||
136 | ret = rw->lock; | ||
137 | if (ret < 0x80000000) { | ||
138 | fence(); | ||
139 | rw->lock = ret + 1; | ||
140 | we_won = 1; | ||
141 | } | ||
142 | __global_unlock1(flags); | ||
143 | if (!we_won) | ||
144 | goto again; | ||
145 | } | ||
146 | |||
147 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int ret; | ||
151 | |||
152 | __global_lock1(flags); | ||
153 | fence(); | ||
154 | ret = rw->lock--; | ||
155 | __global_unlock1(flags); | ||
156 | WARN_ON(ret == 0); | ||
157 | } | ||
158 | |||
159 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | unsigned int ret; | ||
163 | |||
164 | __global_lock1(flags); | ||
165 | ret = rw->lock; | ||
166 | if (ret < 0x80000000) { | ||
167 | fence(); | ||
168 | rw->lock = ret + 1; | ||
169 | } | ||
170 | __global_unlock1(flags); | ||
171 | return (ret < 0x80000000); | ||
172 | } | ||
173 | |||
174 | /* read_can_lock - would read_trylock() succeed? */ | ||
175 | static inline int arch_read_can_lock(arch_rwlock_t *rw) | ||
176 | { | ||
177 | unsigned int ret; | ||
178 | |||
179 | barrier(); | ||
180 | ret = rw->lock; | ||
181 | return (ret < 0x80000000); | ||
182 | } | ||
183 | |||
184 | #endif /* __ASM_SPINLOCK_LOCK1_H */ | ||
diff --git a/arch/metag/include/asm/spinlock_types.h b/arch/metag/include/asm/spinlock_types.h new file mode 100644 index 000000000000..b76391405fea --- /dev/null +++ b/arch/metag/include/asm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ASM_METAG_SPINLOCK_TYPES_H | ||
2 | #define _ASM_METAG_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } arch_spinlock_t; | ||
11 | |||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } arch_rwlock_t; | ||
17 | |||
18 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif /* _ASM_METAG_SPINLOCK_TYPES_H */ | ||