aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rwsem.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-05-07 09:45:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 10:20:16 -0400
commited00f64346631dff035adfb9b0240daaa8b46c4e (patch)
tree61afae642c8a3333587bf73a8dc19849416e243d /lib/rwsem.c
parent023fe4f712028d25b42d31984abae1f3d3f0e3e2 (diff)
rwsem: more agressive lock stealing in rwsem_down_write_failed
Some small code simplifications can be achieved by doing more agressive lock stealing: - When rwsem_down_write_failed() notices that there are no active locks (and thus no thread to wake us if we decided to sleep), it used to wake the first queued process. However, stealing the lock is also sufficient to deal with this case, so we don't need this check anymore. - In try_get_writer_sem(), we can steal the lock even when the first waiter is a reader. This is correct because the code path that wakes readers is protected by the wait_lock. As to the performance effects of this change, they are expected to be minimal: readers are still granted the lock (rather than having to acquire it themselves) when they reach the front of the wait queue, so we have essentially the same behavior as in rwsem-spinlock. Signed-off-by: Michel Lespinasse <walken@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Peter Hurley <peter@hurleysoftware.com> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/rwsem.c')
-rw-r--r--lib/rwsem.c29
1 files changed, 8 insertions, 21 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c
index c73bd96dc30c..2360bf204098 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -143,20 +143,12 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
143} 143}
144 144
145/* Try to get write sem, caller holds sem->wait_lock: */ 145/* Try to get write sem, caller holds sem->wait_lock: */
146static int try_get_writer_sem(struct rw_semaphore *sem, 146static int try_get_writer_sem(struct rw_semaphore *sem)
147 struct rwsem_waiter *waiter)
148{ 147{
149 struct rwsem_waiter *fwaiter;
150 long oldcount, adjustment; 148 long oldcount, adjustment;
151 149
152 /* only steal when first waiter is writing */
153 fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
154 if (fwaiter->type != RWSEM_WAITING_FOR_WRITE)
155 return 0;
156
157 adjustment = RWSEM_ACTIVE_WRITE_BIAS; 150 adjustment = RWSEM_ACTIVE_WRITE_BIAS;
158 /* Only one waiter in the queue: */ 151 if (list_is_singular(&sem->wait_list))
159 if (fwaiter == waiter && waiter->list.next == &sem->wait_list)
160 adjustment -= RWSEM_WAITING_BIAS; 152 adjustment -= RWSEM_WAITING_BIAS;
161 153
162try_again_write: 154try_again_write:
@@ -233,23 +225,18 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
233 /* we're now waiting on the lock, but no longer actively locking */ 225 /* we're now waiting on the lock, but no longer actively locking */
234 count = rwsem_atomic_update(adjustment, sem); 226 count = rwsem_atomic_update(adjustment, sem);
235 227
236 /* If there are no active locks, wake the front queued process(es) up. 228 /* If there were already threads queued before us and there are no
237 * 229 * active writers, the lock must be read owned; so we try to wake
238 * Alternatively, if we're called from a failed down_write(), there 230 * any read locks that were queued ahead of us. */
239 * were already threads queued before us and there are no active 231 if (count > RWSEM_WAITING_BIAS &&
240 * writers, the lock must be read owned; so we try to wake any read 232 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
241 * locks that were queued ahead of us. */
242 if (count == RWSEM_WAITING_BIAS)
243 sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE);
244 else if (count > RWSEM_WAITING_BIAS &&
245 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
246 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 233 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
247 234
248 /* wait until we successfully acquire the lock */ 235 /* wait until we successfully acquire the lock */
249 while (true) { 236 while (true) {
250 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 237 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
251 238
252 if (try_get_writer_sem(sem, &waiter)) 239 if (try_get_writer_sem(sem))
253 break; 240 break;
254 241
255 raw_spin_unlock_irq(&sem->wait_lock); 242 raw_spin_unlock_irq(&sem->wait_lock);