summaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2017-03-24 12:32:23 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2017-09-28 01:29:44 -0400
commiteb3b7b848fb3dd00f7a57d633d4ae4d194aa7865 (patch)
treeab510aeb7e5aedc07d1cd0e60c5d430c74e02bd8 /arch/s390/lib
parentb96f7d881ad94203e997cd2aa7112d4a06d121ef (diff)
s390/rwlock: introduce rwlock wait queueing
Like the common queued rwlock code the s390 implementation uses the queued spinlock code on a spinlock_t embedded in the rwlock_t to achieve the queueing. The encoding of the rwlock_t differs though, the counter field in the rwlock_t is split into two parts. The upper two bytes hold the write bit and the write wait counter, the lower two bytes hold the read counter. The arch_read_lock operation works exactly like the common qrwlock but the enqueue operation for a writer follows a diffent logic. After the failed inline try to get the rwlock in write, the writer first increases the write wait counter, acquires the wait spin_lock for the queueing, and then loops until there are no readers and the write bit is zero. Without the write wait counter a CPU that just released the rwlock could immediately reacquire the lock in the inline code, bypassing all outstanding read and write waiters. For s390 this would cause massive imbalances in favour of writers in case of a contended rwlock. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/spinlock.c138
1 files changed, 29 insertions, 109 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 6747134227cd..43b0d46c3786 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -268,129 +268,49 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
268} 268}
269EXPORT_SYMBOL(arch_spin_trylock_retry); 269EXPORT_SYMBOL(arch_spin_trylock_retry);
270 270
271void _raw_read_lock_wait(arch_rwlock_t *rw) 271void arch_read_lock_wait(arch_rwlock_t *rw)
272{ 272{
273 int count = spin_retry; 273 if (unlikely(in_interrupt())) {
274 int owner, old; 274 while (READ_ONCE(rw->cnts) & 0x10000)
275 275 barrier();
276#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 276 return;
277 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
278#endif
279 owner = 0;
280 while (1) {
281 if (count-- <= 0) {
282 if (owner && arch_vcpu_is_preempted(owner - 1))
283 smp_yield_cpu(owner - 1);
284 count = spin_retry;
285 }
286 old = ACCESS_ONCE(rw->lock);
287 owner = ACCESS_ONCE(rw->owner);
288 if (old < 0)
289 continue;
290 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
291 return;
292 } 277 }
293}
294EXPORT_SYMBOL(_raw_read_lock_wait);
295
296int _raw_read_trylock_retry(arch_rwlock_t *rw)
297{
298 int count = spin_retry;
299 int old;
300 278
301 while (count-- > 0) { 279 /* Remove this reader again to allow recursive read locking */
302 old = ACCESS_ONCE(rw->lock); 280 __atomic_add_const(-1, &rw->cnts);
303 if (old < 0) 281 /* Put the reader into the wait queue */
304 continue; 282 arch_spin_lock(&rw->wait);
305 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1)) 283 /* Now add this reader to the count value again */
306 return 1; 284 __atomic_add_const(1, &rw->cnts);
307 } 285 /* Loop until the writer is done */
308 return 0; 286 while (READ_ONCE(rw->cnts) & 0x10000)
287 barrier();
288 arch_spin_unlock(&rw->wait);
309} 289}
310EXPORT_SYMBOL(_raw_read_trylock_retry); 290EXPORT_SYMBOL(arch_read_lock_wait);
311 291
312#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 292void arch_write_lock_wait(arch_rwlock_t *rw)
313
314void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
315{ 293{
316 int count = spin_retry; 294 int old;
317 int owner, old;
318
319 owner = 0;
320 while (1) {
321 if (count-- <= 0) {
322 if (owner && arch_vcpu_is_preempted(owner - 1))
323 smp_yield_cpu(owner - 1);
324 count = spin_retry;
325 }
326 old = ACCESS_ONCE(rw->lock);
327 owner = ACCESS_ONCE(rw->owner);
328 smp_mb();
329 if (old >= 0) {
330 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
331 old = prev;
332 }
333 if ((old & 0x7fffffff) == 0 && prev >= 0)
334 break;
335 }
336}
337EXPORT_SYMBOL(_raw_write_lock_wait);
338 295
339#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 296 /* Add this CPU to the write waiters */
297 __atomic_add(0x20000, &rw->cnts);
340 298
341void _raw_write_lock_wait(arch_rwlock_t *rw) 299 /* Put the writer into the wait queue */
342{ 300 arch_spin_lock(&rw->wait);
343 int count = spin_retry;
344 int owner, old, prev;
345 301
346 prev = 0x80000000;
347 owner = 0;
348 while (1) { 302 while (1) {
349 if (count-- <= 0) { 303 old = READ_ONCE(rw->cnts);
350 if (owner && arch_vcpu_is_preempted(owner - 1)) 304 if ((old & 0x1ffff) == 0 &&
351 smp_yield_cpu(owner - 1); 305 __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
352 count = spin_retry; 306 /* Got the lock */
353 }
354 old = ACCESS_ONCE(rw->lock);
355 owner = ACCESS_ONCE(rw->owner);
356 if (old >= 0 &&
357 __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
358 prev = old;
359 else
360 smp_mb();
361 if ((old & 0x7fffffff) == 0 && prev >= 0)
362 break; 307 break;
308 barrier();
363 } 309 }
364}
365EXPORT_SYMBOL(_raw_write_lock_wait);
366
367#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
368
369int _raw_write_trylock_retry(arch_rwlock_t *rw)
370{
371 int count = spin_retry;
372 int old;
373
374 while (count-- > 0) {
375 old = ACCESS_ONCE(rw->lock);
376 if (old)
377 continue;
378 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
379 return 1;
380 }
381 return 0;
382}
383EXPORT_SYMBOL(_raw_write_trylock_retry);
384 310
385void arch_lock_relax(int cpu) 311 arch_spin_unlock(&rw->wait);
386{
387 if (!cpu)
388 return;
389 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
390 return;
391 smp_yield_cpu(cpu - 1);
392} 312}
393EXPORT_SYMBOL(arch_lock_relax); 313EXPORT_SYMBOL(arch_write_lock_wait);
394 314
395void arch_spin_relax(arch_spinlock_t *lp) 315void arch_spin_relax(arch_spinlock_t *lp)
396{ 316{