diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-08-21 21:34:31 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-09-20 22:57:51 -0400 |
commit | 8a467a529f01c3471b195a0e8989c6177fe145ef (patch) | |
tree | 0f3c5bf78acf98c28ebb36110c94d6b8dc2e836b /include/asm-sh/spinlock.h | |
parent | b05d1865b46ea72c66ba082598ba370582bb590e (diff) |
sh: Overhaul spinlocks and rwlocks for SH-4A SMP.
This rips out some of the old spinlock and rwlock behaviour that the SH-2
parts were using and reworks them for LL/SC semantics on the SH-4A.
This is primarily only useful for SH-X3 multi-cores, but can also be used
for building CONFIG_SMP=y testing kernels on SH-4A UP.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh/spinlock.h')
-rw-r--r-- | include/asm-sh/spinlock.h | 181 |
1 files changed, 142 insertions, 39 deletions
diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h index 92f6e2008b2e..e793181d64da 100644 --- a/include/asm-sh/spinlock.h +++ b/include/asm-sh/spinlock.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * include/asm-sh/spinlock.h | 2 | * include/asm-sh/spinlock.h |
3 | * | 3 | * |
4 | * Copyright (C) 2002, 2003 Paul Mundt | 4 | * Copyright (C) 2002, 2003 Paul Mundt |
5 | * Copyright (C) 2006, 2007 Akio Idehara | ||
5 | * | 6 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
@@ -10,17 +11,22 @@ | |||
10 | #ifndef __ASM_SH_SPINLOCK_H | 11 | #ifndef __ASM_SH_SPINLOCK_H |
11 | #define __ASM_SH_SPINLOCK_H | 12 | #define __ASM_SH_SPINLOCK_H |
12 | 13 | ||
13 | #include <asm/atomic.h> | 14 | /* |
14 | #include <asm/spinlock_types.h> | 15 | * The only locking implemented here uses SH-4A opcodes. For others, |
16 | * split this out as per atomic-*.h. | ||
17 | */ | ||
18 | #ifndef CONFIG_CPU_SH4A | ||
19 | #error "Need movli.l/movco.l for spinlocks" | ||
20 | #endif | ||
15 | 21 | ||
16 | /* | 22 | /* |
17 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 23 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
18 | */ | 24 | */ |
19 | 25 | ||
20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | 26 | #define __raw_spin_is_locked(x) ((x)->lock <= 0) |
21 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 27 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
22 | #define __raw_spin_unlock_wait(x) \ | 28 | #define __raw_spin_unlock_wait(x) \ |
23 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | 29 | do { cpu_relax(); } while ((x)->lock) |
24 | 30 | ||
25 | /* | 31 | /* |
26 | * Simple spin lock operations. There are two variants, one clears IRQ's | 32 | * Simple spin lock operations. There are two variants, one clears IRQ's |
@@ -30,12 +36,19 @@ | |||
30 | */ | 36 | */ |
31 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
32 | { | 38 | { |
39 | unsigned long tmp; | ||
40 | unsigned long oldval; | ||
41 | |||
33 | __asm__ __volatile__ ( | 42 | __asm__ __volatile__ ( |
34 | "1:\n\t" | 43 | "1: \n\t" |
35 | "tas.b @%0\n\t" | 44 | "movli.l @%2, %0 ! __raw_spin_lock \n\t" |
36 | "bf/s 1b\n\t" | 45 | "mov %0, %1 \n\t" |
37 | "nop\n\t" | 46 | "mov #0, %0 \n\t" |
38 | : "=r" (lock->lock) | 47 | "movco.l %0, @%2 \n\t" |
48 | "bf 1b \n\t" | ||
49 | "cmp/pl %1 \n\t" | ||
50 | "bf 1b \n\t" | ||
51 | : "=&z" (tmp), "=&r" (oldval) | ||
39 | : "r" (&lock->lock) | 52 | : "r" (&lock->lock) |
40 | : "t", "memory" | 53 | : "t", "memory" |
41 | ); | 54 | ); |
@@ -43,12 +56,36 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
43 | 56 | ||
44 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 57 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
45 | { | 58 | { |
46 | //assert_spin_locked(lock); | 59 | unsigned long tmp; |
47 | 60 | ||
48 | lock->lock = 0; | 61 | __asm__ __volatile__ ( |
62 | "mov #1, %0 ! __raw_spin_unlock \n\t" | ||
63 | "mov.l %0, @%1 \n\t" | ||
64 | : "=&z" (tmp) | ||
65 | : "r" (&lock->lock) | ||
66 | : "t", "memory" | ||
67 | ); | ||
49 | } | 68 | } |
50 | 69 | ||
51 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) | 70 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
71 | { | ||
72 | unsigned long tmp, oldval; | ||
73 | |||
74 | __asm__ __volatile__ ( | ||
75 | "1: \n\t" | ||
76 | "movli.l @%2, %0 ! __raw_spin_trylock \n\t" | ||
77 | "mov %0, %1 \n\t" | ||
78 | "mov #0, %0 \n\t" | ||
79 | "movco.l %0, @%2 \n\t" | ||
80 | "bf 1b \n\t" | ||
81 | "synco \n\t" | ||
82 | : "=&z" (tmp), "=&r" (oldval) | ||
83 | : "r" (&lock->lock) | ||
84 | : "t", "memory" | ||
85 | ); | ||
86 | |||
87 | return oldval; | ||
88 | } | ||
52 | 89 | ||
53 | /* | 90 | /* |
54 | * Read-write spinlocks, allowing multiple readers but only one writer. | 91 | * Read-write spinlocks, allowing multiple readers but only one writer. |
@@ -59,58 +96,124 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
59 | * read-locks. | 96 | * read-locks. |
60 | */ | 97 | */ |
61 | 98 | ||
99 | /** | ||
100 | * read_can_lock - would read_trylock() succeed? | ||
101 | * @lock: the rwlock in question. | ||
102 | */ | ||
103 | #define __raw_read_can_lock(x) ((x)->lock > 0) | ||
104 | |||
105 | /** | ||
106 | * write_can_lock - would write_trylock() succeed? | ||
107 | * @lock: the rwlock in question. | ||
108 | */ | ||
109 | #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
110 | |||
62 | static inline void __raw_read_lock(raw_rwlock_t *rw) | 111 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
63 | { | 112 | { |
64 | __raw_spin_lock(&rw->lock); | 113 | unsigned long tmp; |
65 | |||
66 | atomic_inc(&rw->counter); | ||
67 | 114 | ||
68 | __raw_spin_unlock(&rw->lock); | 115 | __asm__ __volatile__ ( |
116 | "1: \n\t" | ||
117 | "movli.l @%1, %0 ! __raw_read_lock \n\t" | ||
118 | "cmp/pl %0 \n\t" | ||
119 | "bf 1b \n\t" | ||
120 | "add #-1, %0 \n\t" | ||
121 | "movco.l %0, @%1 \n\t" | ||
122 | "bf 1b \n\t" | ||
123 | : "=&z" (tmp) | ||
124 | : "r" (&rw->lock) | ||
125 | : "t", "memory" | ||
126 | ); | ||
69 | } | 127 | } |
70 | 128 | ||
71 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | 129 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
72 | { | 130 | { |
73 | __raw_spin_lock(&rw->lock); | 131 | unsigned long tmp; |
74 | |||
75 | atomic_dec(&rw->counter); | ||
76 | 132 | ||
77 | __raw_spin_unlock(&rw->lock); | 133 | __asm__ __volatile__ ( |
134 | "1: \n\t" | ||
135 | "movli.l @%1, %0 ! __raw_read_unlock \n\t" | ||
136 | "add #1, %0 \n\t" | ||
137 | "movco.l %0, @%1 \n\t" | ||
138 | "bf 1b \n\t" | ||
139 | : "=&z" (tmp) | ||
140 | : "r" (&rw->lock) | ||
141 | : "t", "memory" | ||
142 | ); | ||
78 | } | 143 | } |
79 | 144 | ||
80 | static inline void __raw_write_lock(raw_rwlock_t *rw) | 145 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
81 | { | 146 | { |
82 | __raw_spin_lock(&rw->lock); | 147 | unsigned long tmp; |
83 | atomic_set(&rw->counter, -1); | 148 | |
149 | __asm__ __volatile__ ( | ||
150 | "1: \n\t" | ||
151 | "movli.l @%1, %0 ! __raw_write_lock \n\t" | ||
152 | "cmp/hs %2, %0 \n\t" | ||
153 | "bf 1b \n\t" | ||
154 | "sub %2, %0 \n\t" | ||
155 | "movco.l %0, @%1 \n\t" | ||
156 | "bf 1b \n\t" | ||
157 | : "=&z" (tmp) | ||
158 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
159 | : "t", "memory" | ||
160 | ); | ||
84 | } | 161 | } |
85 | 162 | ||
86 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | 163 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
87 | { | 164 | { |
88 | atomic_set(&rw->counter, 0); | 165 | __asm__ __volatile__ ( |
89 | __raw_spin_unlock(&rw->lock); | 166 | "mov.l %1, @%0 ! __raw_write_unlock \n\t" |
167 | : | ||
168 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
169 | : "t", "memory" | ||
170 | ); | ||
90 | } | 171 | } |
91 | 172 | ||
92 | static inline int __raw_write_can_lock(raw_rwlock_t *rw) | 173 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
93 | { | 174 | { |
94 | return (atomic_read(&rw->counter) == RW_LOCK_BIAS); | 175 | unsigned long tmp, oldval; |
95 | } | ||
96 | 176 | ||
97 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | 177 | __asm__ __volatile__ ( |
98 | { | 178 | "1: \n\t" |
99 | atomic_t *count = (atomic_t*)lock; | 179 | "movli.l @%2, %0 ! __raw_read_trylock \n\t" |
100 | if (atomic_dec_return(count) >= 0) | 180 | "mov %0, %1 \n\t" |
101 | return 1; | 181 | "cmp/pl %0 \n\t" |
102 | atomic_inc(count); | 182 | "bf 2f \n\t" |
103 | return 0; | 183 | "add #-1, %0 \n\t" |
184 | "movco.l %0, @%2 \n\t" | ||
185 | "bf 1b \n\t" | ||
186 | "2: \n\t" | ||
187 | "synco \n\t" | ||
188 | : "=&z" (tmp), "=&r" (oldval) | ||
189 | : "r" (&rw->lock) | ||
190 | : "t", "memory" | ||
191 | ); | ||
192 | |||
193 | return (oldval > 0); | ||
104 | } | 194 | } |
105 | 195 | ||
106 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | 196 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
107 | { | 197 | { |
108 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | 198 | unsigned long tmp, oldval; |
109 | return 1; | 199 | |
110 | 200 | __asm__ __volatile__ ( | |
111 | atomic_add(RW_LOCK_BIAS, &rw->counter); | 201 | "1: \n\t" |
202 | "movli.l @%2, %0 ! __raw_write_trylock \n\t" | ||
203 | "mov %0, %1 \n\t" | ||
204 | "cmp/hs %3, %0 \n\t" | ||
205 | "bf 2f \n\t" | ||
206 | "sub %3, %0 \n\t" | ||
207 | "2: \n\t" | ||
208 | "movco.l %0, @%2 \n\t" | ||
209 | "bf 1b \n\t" | ||
210 | "synco \n\t" | ||
211 | : "=&z" (tmp), "=&r" (oldval) | ||
212 | : "r" (&rw->lock), "r" (RW_LOCK_BIAS) | ||
213 | : "t", "memory" | ||
214 | ); | ||
112 | 215 | ||
113 | return 0; | 216 | return (oldval > (RW_LOCK_BIAS - 1)); |
114 | } | 217 | } |
115 | 218 | ||
116 | #define _raw_spin_relax(lock) cpu_relax() | 219 | #define _raw_spin_relax(lock) cpu_relax() |