aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rwsem-xadd.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r--kernel/locking/rwsem-xadd.c204
1 files changed, 105 insertions, 99 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index fbe96341beee..6b3ee9948bf1 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -147,6 +147,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
147 * will notice the queued writer. 147 * will notice the queued writer.
148 */ 148 */
149 wake_q_add(wake_q, waiter->task); 149 wake_q_add(wake_q, waiter->task);
150 lockevent_inc(rwsem_wake_writer);
150 } 151 }
151 152
152 return; 153 return;
@@ -176,9 +177,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
176 goto try_reader_grant; 177 goto try_reader_grant;
177 } 178 }
178 /* 179 /*
179 * It is not really necessary to set it to reader-owned here, 180 * Set it to reader-owned to give spinners an early
180 * but it gives the spinners an early indication that the 181 * indication that readers now have the lock.
181 * readers now have the lock.
182 */ 182 */
183 __rwsem_set_reader_owned(sem, waiter->task); 183 __rwsem_set_reader_owned(sem, waiter->task);
184 } 184 }
@@ -215,6 +215,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
215 } 215 }
216 216
217 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; 217 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
218 lockevent_cond_inc(rwsem_wake_reader, woken);
218 if (list_empty(&sem->wait_list)) { 219 if (list_empty(&sem->wait_list)) {
219 /* hit end of list above */ 220 /* hit end of list above */
220 adjustment -= RWSEM_WAITING_BIAS; 221 adjustment -= RWSEM_WAITING_BIAS;
@@ -225,92 +226,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
225} 226}
226 227
227/* 228/*
228 * Wait for the read lock to be granted
229 */
230static inline struct rw_semaphore __sched *
231__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
232{
233 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
234 struct rwsem_waiter waiter;
235 DEFINE_WAKE_Q(wake_q);
236
237 waiter.task = current;
238 waiter.type = RWSEM_WAITING_FOR_READ;
239
240 raw_spin_lock_irq(&sem->wait_lock);
241 if (list_empty(&sem->wait_list)) {
242 /*
243 * In case the wait queue is empty and the lock isn't owned
244 * by a writer, this reader can exit the slowpath and return
245 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
246 * been set in the count.
247 */
248 if (atomic_long_read(&sem->count) >= 0) {
249 raw_spin_unlock_irq(&sem->wait_lock);
250 return sem;
251 }
252 adjustment += RWSEM_WAITING_BIAS;
253 }
254 list_add_tail(&waiter.list, &sem->wait_list);
255
256 /* we're now waiting on the lock, but no longer actively locking */
257 count = atomic_long_add_return(adjustment, &sem->count);
258
259 /*
260 * If there are no active locks, wake the front queued process(es).
261 *
262 * If there are no writers and we are first in the queue,
263 * wake our own waiter to join the existing active readers !
264 */
265 if (count == RWSEM_WAITING_BIAS ||
266 (count > RWSEM_WAITING_BIAS &&
267 adjustment != -RWSEM_ACTIVE_READ_BIAS))
268 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
269
270 raw_spin_unlock_irq(&sem->wait_lock);
271 wake_up_q(&wake_q);
272
273 /* wait to be given the lock */
274 while (true) {
275 set_current_state(state);
276 if (!waiter.task)
277 break;
278 if (signal_pending_state(state, current)) {
279 raw_spin_lock_irq(&sem->wait_lock);
280 if (waiter.task)
281 goto out_nolock;
282 raw_spin_unlock_irq(&sem->wait_lock);
283 break;
284 }
285 schedule();
286 }
287
288 __set_current_state(TASK_RUNNING);
289 return sem;
290out_nolock:
291 list_del(&waiter.list);
292 if (list_empty(&sem->wait_list))
293 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
294 raw_spin_unlock_irq(&sem->wait_lock);
295 __set_current_state(TASK_RUNNING);
296 return ERR_PTR(-EINTR);
297}
298
299__visible struct rw_semaphore * __sched
300rwsem_down_read_failed(struct rw_semaphore *sem)
301{
302 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
303}
304EXPORT_SYMBOL(rwsem_down_read_failed);
305
306__visible struct rw_semaphore * __sched
307rwsem_down_read_failed_killable(struct rw_semaphore *sem)
308{
309 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
310}
311EXPORT_SYMBOL(rwsem_down_read_failed_killable);
312
313/*
314 * This function must be called with the sem->wait_lock held to prevent 229 * This function must be called with the sem->wait_lock held to prevent
315 * race conditions between checking the rwsem wait list and setting the 230 * race conditions between checking the rwsem wait list and setting the
316 * sem->count accordingly. 231 * sem->count accordingly.
@@ -346,21 +261,17 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
346 */ 261 */
347static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) 262static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
348{ 263{
349 long old, count = atomic_long_read(&sem->count); 264 long count = atomic_long_read(&sem->count);
350
351 while (true) {
352 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
353 return false;
354 265
355 old = atomic_long_cmpxchg_acquire(&sem->count, count, 266 while (!count || count == RWSEM_WAITING_BIAS) {
356 count + RWSEM_ACTIVE_WRITE_BIAS); 267 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
357 if (old == count) { 268 count + RWSEM_ACTIVE_WRITE_BIAS)) {
358 rwsem_set_owner(sem); 269 rwsem_set_owner(sem);
270 lockevent_inc(rwsem_opt_wlock);
359 return true; 271 return true;
360 } 272 }
361
362 count = old;
363 } 273 }
274 return false;
364} 275}
365 276
366static inline bool owner_on_cpu(struct task_struct *owner) 277static inline bool owner_on_cpu(struct task_struct *owner)
@@ -481,6 +392,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
481 osq_unlock(&sem->osq); 392 osq_unlock(&sem->osq);
482done: 393done:
483 preempt_enable(); 394 preempt_enable();
395 lockevent_cond_inc(rwsem_opt_fail, !taken);
484 return taken; 396 return taken;
485} 397}
486 398
@@ -505,6 +417,97 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
505#endif 417#endif
506 418
507/* 419/*
420 * Wait for the read lock to be granted
421 */
422static inline struct rw_semaphore __sched *
423__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
424{
425 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
426 struct rwsem_waiter waiter;
427 DEFINE_WAKE_Q(wake_q);
428
429 waiter.task = current;
430 waiter.type = RWSEM_WAITING_FOR_READ;
431
432 raw_spin_lock_irq(&sem->wait_lock);
433 if (list_empty(&sem->wait_list)) {
434 /*
435 * In case the wait queue is empty and the lock isn't owned
436 * by a writer, this reader can exit the slowpath and return
437 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
438 * been set in the count.
439 */
440 if (atomic_long_read(&sem->count) >= 0) {
441 raw_spin_unlock_irq(&sem->wait_lock);
442 rwsem_set_reader_owned(sem);
443 lockevent_inc(rwsem_rlock_fast);
444 return sem;
445 }
446 adjustment += RWSEM_WAITING_BIAS;
447 }
448 list_add_tail(&waiter.list, &sem->wait_list);
449
450 /* we're now waiting on the lock, but no longer actively locking */
451 count = atomic_long_add_return(adjustment, &sem->count);
452
453 /*
454 * If there are no active locks, wake the front queued process(es).
455 *
456 * If there are no writers and we are first in the queue,
457 * wake our own waiter to join the existing active readers !
458 */
459 if (count == RWSEM_WAITING_BIAS ||
460 (count > RWSEM_WAITING_BIAS &&
461 adjustment != -RWSEM_ACTIVE_READ_BIAS))
462 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
463
464 raw_spin_unlock_irq(&sem->wait_lock);
465 wake_up_q(&wake_q);
466
467 /* wait to be given the lock */
468 while (true) {
469 set_current_state(state);
470 if (!waiter.task)
471 break;
472 if (signal_pending_state(state, current)) {
473 raw_spin_lock_irq(&sem->wait_lock);
474 if (waiter.task)
475 goto out_nolock;
476 raw_spin_unlock_irq(&sem->wait_lock);
477 break;
478 }
479 schedule();
480 lockevent_inc(rwsem_sleep_reader);
481 }
482
483 __set_current_state(TASK_RUNNING);
484 lockevent_inc(rwsem_rlock);
485 return sem;
486out_nolock:
487 list_del(&waiter.list);
488 if (list_empty(&sem->wait_list))
489 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
490 raw_spin_unlock_irq(&sem->wait_lock);
491 __set_current_state(TASK_RUNNING);
492 lockevent_inc(rwsem_rlock_fail);
493 return ERR_PTR(-EINTR);
494}
495
496__visible struct rw_semaphore * __sched
497rwsem_down_read_failed(struct rw_semaphore *sem)
498{
499 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
500}
501EXPORT_SYMBOL(rwsem_down_read_failed);
502
503__visible struct rw_semaphore * __sched
504rwsem_down_read_failed_killable(struct rw_semaphore *sem)
505{
506 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
507}
508EXPORT_SYMBOL(rwsem_down_read_failed_killable);
509
510/*
508 * Wait until we successfully acquire the write lock 511 * Wait until we successfully acquire the write lock
509 */ 512 */
510static inline struct rw_semaphore * 513static inline struct rw_semaphore *
@@ -580,6 +583,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
580 goto out_nolock; 583 goto out_nolock;
581 584
582 schedule(); 585 schedule();
586 lockevent_inc(rwsem_sleep_writer);
583 set_current_state(state); 587 set_current_state(state);
584 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK); 588 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
585 589
@@ -588,6 +592,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
588 __set_current_state(TASK_RUNNING); 592 __set_current_state(TASK_RUNNING);
589 list_del(&waiter.list); 593 list_del(&waiter.list);
590 raw_spin_unlock_irq(&sem->wait_lock); 594 raw_spin_unlock_irq(&sem->wait_lock);
595 lockevent_inc(rwsem_wlock);
591 596
592 return ret; 597 return ret;
593 598
@@ -601,6 +606,7 @@ out_nolock:
601 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); 606 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
602 raw_spin_unlock_irq(&sem->wait_lock); 607 raw_spin_unlock_irq(&sem->wait_lock);
603 wake_up_q(&wake_q); 608 wake_up_q(&wake_q);
609 lockevent_inc(rwsem_wlock_fail);
604 610
605 return ERR_PTR(-EINTR); 611 return ERR_PTR(-EINTR);
606} 612}