diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-22 10:34:38 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-09-25 04:52:13 -0400 |
commit | bbae71bf9c2fe90dc5642d4cddbbc1994861fd92 (patch) | |
tree | 80ce9213a904817502ffc588eb31d5731cc9a250 /arch/s390 | |
parent | 94232a4332de3bc210e7067fd43521b3eb12336a (diff) |
s390/rwlock: use the interlocked-access facility 1 instructions
Make use of the load-and-add, load-and-or and load-and-and instructions
to atomically update the read-write lock without a compare-and-swap loop.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 76 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 34 |
2 files changed, 108 insertions, 2 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index f9537b91ff21..d6bdf906caa5 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h | |||
@@ -130,8 +130,6 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
130 | */ | 130 | */ |
131 | #define arch_write_can_lock(x) ((x)->lock == 0) | 131 | #define arch_write_can_lock(x) ((x)->lock == 0) |
132 | 132 | ||
133 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); | ||
134 | extern void _raw_write_lock_wait(arch_rwlock_t *lp); | ||
135 | extern int _raw_read_trylock_retry(arch_rwlock_t *lp); | 133 | extern int _raw_read_trylock_retry(arch_rwlock_t *lp); |
136 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); | 134 | extern int _raw_write_trylock_retry(arch_rwlock_t *lp); |
137 | 135 | ||
@@ -152,6 +150,78 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) | |||
152 | _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); | 150 | _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); |
153 | } | 151 | } |
154 | 152 | ||
153 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
154 | |||
155 | #define __RAW_OP_OR "lao" | ||
156 | #define __RAW_OP_AND "lan" | ||
157 | #define __RAW_OP_ADD "laa" | ||
158 | |||
159 | #define __RAW_LOCK(ptr, op_val, op_string) \ | ||
160 | ({ \ | ||
161 | unsigned int old_val; \ | ||
162 | \ | ||
163 | typecheck(unsigned int *, ptr); \ | ||
164 | asm volatile( \ | ||
165 | op_string " %0,%2,%1\n" \ | ||
166 | "bcr 14,0\n" \ | ||
167 | : "=d" (old_val), "+Q" (*ptr) \ | ||
168 | : "d" (op_val) \ | ||
169 | : "cc", "memory"); \ | ||
170 | old_val; \ | ||
171 | }) | ||
172 | |||
173 | #define __RAW_UNLOCK(ptr, op_val, op_string) \ | ||
174 | ({ \ | ||
175 | unsigned int old_val; \ | ||
176 | \ | ||
177 | typecheck(unsigned int *, ptr); \ | ||
178 | asm volatile( \ | ||
179 | "bcr 14,0\n" \ | ||
180 | op_string " %0,%2,%1\n" \ | ||
181 | : "=d" (old_val), "+Q" (*ptr) \ | ||
182 | : "d" (op_val) \ | ||
183 | : "cc", "memory"); \ | ||
184 | old_val; \ | ||
185 | }) | ||
186 | |||
187 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); | ||
188 | extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev); | ||
189 | |||
190 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
191 | { | ||
192 | unsigned int old; | ||
193 | |||
194 | old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD); | ||
195 | if ((int) old < 0) | ||
196 | _raw_read_lock_wait(rw); | ||
197 | } | ||
198 | |||
199 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
200 | { | ||
201 | __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD); | ||
202 | } | ||
203 | |||
204 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
205 | { | ||
206 | unsigned int old; | ||
207 | |||
208 | old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); | ||
209 | if (old != 0) | ||
210 | _raw_write_lock_wait(rw, old); | ||
211 | rw->owner = SPINLOCK_LOCKVAL; | ||
212 | } | ||
213 | |||
214 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
215 | { | ||
216 | rw->owner = 0; | ||
217 | __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND); | ||
218 | } | ||
219 | |||
220 | #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
221 | |||
222 | extern void _raw_read_lock_wait(arch_rwlock_t *lp); | ||
223 | extern void _raw_write_lock_wait(arch_rwlock_t *lp); | ||
224 | |||
155 | static inline void arch_read_lock(arch_rwlock_t *rw) | 225 | static inline void arch_read_lock(arch_rwlock_t *rw) |
156 | { | 226 | { |
157 | if (!arch_read_trylock_once(rw)) | 227 | if (!arch_read_trylock_once(rw)) |
@@ -187,6 +257,8 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
187 | : "cc", "memory"); | 257 | : "cc", "memory"); |
188 | } | 258 | } |
189 | 259 | ||
260 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
261 | |||
190 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 262 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
191 | { | 263 | { |
192 | if (!arch_read_trylock_once(rw)) | 264 | if (!arch_read_trylock_once(rw)) |
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 01f29bb9c71b..034a35a3e9c1 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c | |||
@@ -114,6 +114,9 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) | |||
114 | unsigned int owner, old; | 114 | unsigned int owner, old; |
115 | int count = spin_retry; | 115 | int count = spin_retry; |
116 | 116 | ||
117 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
118 | __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD); | ||
119 | #endif | ||
117 | owner = 0; | 120 | owner = 0; |
118 | while (1) { | 121 | while (1) { |
119 | if (count-- <= 0) { | 122 | if (count-- <= 0) { |
@@ -147,6 +150,35 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) | |||
147 | } | 150 | } |
148 | EXPORT_SYMBOL(_raw_read_trylock_retry); | 151 | EXPORT_SYMBOL(_raw_read_trylock_retry); |
149 | 152 | ||
153 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | ||
154 | |||
155 | void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) | ||
156 | { | ||
157 | unsigned int owner, old; | ||
158 | int count = spin_retry; | ||
159 | |||
160 | owner = 0; | ||
161 | while (1) { | ||
162 | if (count-- <= 0) { | ||
163 | if (owner && !smp_vcpu_scheduled(~owner)) | ||
164 | smp_yield_cpu(~owner); | ||
165 | count = spin_retry; | ||
166 | } | ||
167 | old = ACCESS_ONCE(rw->lock); | ||
168 | owner = ACCESS_ONCE(rw->owner); | ||
169 | smp_rmb(); | ||
170 | if ((int) old >= 0) { | ||
171 | prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); | ||
172 | old = prev; | ||
173 | } | ||
174 | if ((old & 0x7fffffff) == 0 && (int) prev >= 0) | ||
175 | break; | ||
176 | } | ||
177 | } | ||
178 | EXPORT_SYMBOL(_raw_write_lock_wait); | ||
179 | |||
180 | #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
181 | |||
150 | void _raw_write_lock_wait(arch_rwlock_t *rw) | 182 | void _raw_write_lock_wait(arch_rwlock_t *rw) |
151 | { | 183 | { |
152 | unsigned int owner, old, prev; | 184 | unsigned int owner, old, prev; |
@@ -173,6 +205,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) | |||
173 | } | 205 | } |
174 | EXPORT_SYMBOL(_raw_write_lock_wait); | 206 | EXPORT_SYMBOL(_raw_write_lock_wait); |
175 | 207 | ||
208 | #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ | ||
209 | |||
176 | int _raw_write_trylock_retry(arch_rwlock_t *rw) | 210 | int _raw_write_trylock_retry(arch_rwlock_t *rw) |
177 | { | 211 | { |
178 | unsigned int old; | 212 | unsigned int old; |