diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-08-02 05:55:55 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-08-02 16:32:35 -0400 |
commit | 4baa9922430662431231ac637adedddbb0cfb2d7 (patch) | |
tree | e8fb765ce3e41c01f33de34a0bc9494f0ae19818 /include/asm-arm/spinlock.h | |
parent | ff4db0a043a5dee7180bdffd178e61cd02812c68 (diff) |
[ARM] move include/asm-arm to arch/arm/include/asm
Move platform independent header files to arch/arm/include/asm, leaving
those in asm/arch* and asm/plat* alone.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/spinlock.h')
-rw-r--r-- | include/asm-arm/spinlock.h | 224 |
1 files changed, 0 insertions, 224 deletions
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h deleted file mode 100644 index 2b41ebbfa7ff..000000000000 --- a/include/asm-arm/spinlock.h +++ /dev/null | |||
@@ -1,224 +0,0 @@ | |||
1 | #ifndef __ASM_SPINLOCK_H | ||
2 | #define __ASM_SPINLOCK_H | ||
3 | |||
4 | #if __LINUX_ARM_ARCH__ < 6 | ||
5 | #error SMP not supported on pre-ARMv6 CPUs | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * ARMv6 Spin-locking. | ||
10 | * | ||
11 | * We exclusively read the old value. If it is zero, we may have | ||
12 | * won the lock, so we try exclusively storing it. A memory barrier | ||
13 | * is required after we get a lock, and before we release it, because | ||
14 | * V6 CPUs are assumed to have weakly ordered memory. | ||
15 | * | ||
16 | * Unlocked value: 0 | ||
17 | * Locked value: 1 | ||
18 | */ | ||
19 | |||
20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | ||
21 | #define __raw_spin_unlock_wait(lock) \ | ||
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | ||
23 | |||
24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
25 | |||
26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | ||
27 | { | ||
28 | unsigned long tmp; | ||
29 | |||
30 | __asm__ __volatile__( | ||
31 | "1: ldrex %0, [%1]\n" | ||
32 | " teq %0, #0\n" | ||
33 | #ifdef CONFIG_CPU_32v6K | ||
34 | " wfene\n" | ||
35 | #endif | ||
36 | " strexeq %0, %2, [%1]\n" | ||
37 | " teqeq %0, #0\n" | ||
38 | " bne 1b" | ||
39 | : "=&r" (tmp) | ||
40 | : "r" (&lock->lock), "r" (1) | ||
41 | : "cc"); | ||
42 | |||
43 | smp_mb(); | ||
44 | } | ||
45 | |||
46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
47 | { | ||
48 | unsigned long tmp; | ||
49 | |||
50 | __asm__ __volatile__( | ||
51 | " ldrex %0, [%1]\n" | ||
52 | " teq %0, #0\n" | ||
53 | " strexeq %0, %2, [%1]" | ||
54 | : "=&r" (tmp) | ||
55 | : "r" (&lock->lock), "r" (1) | ||
56 | : "cc"); | ||
57 | |||
58 | if (tmp == 0) { | ||
59 | smp_mb(); | ||
60 | return 1; | ||
61 | } else { | ||
62 | return 0; | ||
63 | } | ||
64 | } | ||
65 | |||
66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | ||
67 | { | ||
68 | smp_mb(); | ||
69 | |||
70 | __asm__ __volatile__( | ||
71 | " str %1, [%0]\n" | ||
72 | #ifdef CONFIG_CPU_32v6K | ||
73 | " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ | ||
74 | " sev" | ||
75 | #endif | ||
76 | : | ||
77 | : "r" (&lock->lock), "r" (0) | ||
78 | : "cc"); | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * RWLOCKS | ||
83 | * | ||
84 | * | ||
85 | * Write locks are easy - we just set bit 31. When unlocking, we can | ||
86 | * just write zero since the lock is exclusively held. | ||
87 | */ | ||
88 | |||
89 | static inline void __raw_write_lock(raw_rwlock_t *rw) | ||
90 | { | ||
91 | unsigned long tmp; | ||
92 | |||
93 | __asm__ __volatile__( | ||
94 | "1: ldrex %0, [%1]\n" | ||
95 | " teq %0, #0\n" | ||
96 | #ifdef CONFIG_CPU_32v6K | ||
97 | " wfene\n" | ||
98 | #endif | ||
99 | " strexeq %0, %2, [%1]\n" | ||
100 | " teq %0, #0\n" | ||
101 | " bne 1b" | ||
102 | : "=&r" (tmp) | ||
103 | : "r" (&rw->lock), "r" (0x80000000) | ||
104 | : "cc"); | ||
105 | |||
106 | smp_mb(); | ||
107 | } | ||
108 | |||
109 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
110 | { | ||
111 | unsigned long tmp; | ||
112 | |||
113 | __asm__ __volatile__( | ||
114 | "1: ldrex %0, [%1]\n" | ||
115 | " teq %0, #0\n" | ||
116 | " strexeq %0, %2, [%1]" | ||
117 | : "=&r" (tmp) | ||
118 | : "r" (&rw->lock), "r" (0x80000000) | ||
119 | : "cc"); | ||
120 | |||
121 | if (tmp == 0) { | ||
122 | smp_mb(); | ||
123 | return 1; | ||
124 | } else { | ||
125 | return 0; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | ||
130 | { | ||
131 | smp_mb(); | ||
132 | |||
133 | __asm__ __volatile__( | ||
134 | "str %1, [%0]\n" | ||
135 | #ifdef CONFIG_CPU_32v6K | ||
136 | " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ | ||
137 | " sev\n" | ||
138 | #endif | ||
139 | : | ||
140 | : "r" (&rw->lock), "r" (0) | ||
141 | : "cc"); | ||
142 | } | ||
143 | |||
144 | /* write_can_lock - would write_trylock() succeed? */ | ||
145 | #define __raw_write_can_lock(x) ((x)->lock == 0) | ||
146 | |||
147 | /* | ||
148 | * Read locks are a bit more hairy: | ||
149 | * - Exclusively load the lock value. | ||
150 | * - Increment it. | ||
151 | * - Store new lock value if positive, and we still own this location. | ||
152 | * If the value is negative, we've already failed. | ||
153 | * - If we failed to store the value, we want a negative result. | ||
154 | * - If we failed, try again. | ||
155 | * Unlocking is similarly hairy. We may have multiple read locks | ||
156 | * currently active. However, we know we won't have any write | ||
157 | * locks. | ||
158 | */ | ||
159 | static inline void __raw_read_lock(raw_rwlock_t *rw) | ||
160 | { | ||
161 | unsigned long tmp, tmp2; | ||
162 | |||
163 | __asm__ __volatile__( | ||
164 | "1: ldrex %0, [%2]\n" | ||
165 | " adds %0, %0, #1\n" | ||
166 | " strexpl %1, %0, [%2]\n" | ||
167 | #ifdef CONFIG_CPU_32v6K | ||
168 | " wfemi\n" | ||
169 | #endif | ||
170 | " rsbpls %0, %1, #0\n" | ||
171 | " bmi 1b" | ||
172 | : "=&r" (tmp), "=&r" (tmp2) | ||
173 | : "r" (&rw->lock) | ||
174 | : "cc"); | ||
175 | |||
176 | smp_mb(); | ||
177 | } | ||
178 | |||
179 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | ||
180 | { | ||
181 | unsigned long tmp, tmp2; | ||
182 | |||
183 | smp_mb(); | ||
184 | |||
185 | __asm__ __volatile__( | ||
186 | "1: ldrex %0, [%2]\n" | ||
187 | " sub %0, %0, #1\n" | ||
188 | " strex %1, %0, [%2]\n" | ||
189 | " teq %1, #0\n" | ||
190 | " bne 1b" | ||
191 | #ifdef CONFIG_CPU_32v6K | ||
192 | "\n cmp %0, #0\n" | ||
193 | " mcreq p15, 0, %0, c7, c10, 4\n" | ||
194 | " seveq" | ||
195 | #endif | ||
196 | : "=&r" (tmp), "=&r" (tmp2) | ||
197 | : "r" (&rw->lock) | ||
198 | : "cc"); | ||
199 | } | ||
200 | |||
201 | static inline int __raw_read_trylock(raw_rwlock_t *rw) | ||
202 | { | ||
203 | unsigned long tmp, tmp2 = 1; | ||
204 | |||
205 | __asm__ __volatile__( | ||
206 | "1: ldrex %0, [%2]\n" | ||
207 | " adds %0, %0, #1\n" | ||
208 | " strexpl %1, %0, [%2]\n" | ||
209 | : "=&r" (tmp), "+r" (tmp2) | ||
210 | : "r" (&rw->lock) | ||
211 | : "cc"); | ||
212 | |||
213 | smp_mb(); | ||
214 | return tmp2 == 0; | ||
215 | } | ||
216 | |||
217 | /* read_can_lock - would read_trylock() succeed? */ | ||
218 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | ||
219 | |||
220 | #define _raw_spin_relax(lock) cpu_relax() | ||
221 | #define _raw_read_relax(lock) cpu_relax() | ||
222 | #define _raw_write_relax(lock) cpu_relax() | ||
223 | |||
224 | #endif /* __ASM_SPINLOCK_H */ | ||