diff options
-rw-r--r-- | lib/rwsem.c | 29 |
1 files changed, 8 insertions, 21 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c index c73bd96dc30c..2360bf204098 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -143,20 +143,12 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |||
143 | } | 143 | } |
144 | 144 | ||
145 | /* Try to get write sem, caller holds sem->wait_lock: */ | 145 | /* Try to get write sem, caller holds sem->wait_lock: */ |
146 | static int try_get_writer_sem(struct rw_semaphore *sem, | 146 | static int try_get_writer_sem(struct rw_semaphore *sem) |
147 | struct rwsem_waiter *waiter) | ||
148 | { | 147 | { |
149 | struct rwsem_waiter *fwaiter; | ||
150 | long oldcount, adjustment; | 148 | long oldcount, adjustment; |
151 | 149 | ||
152 | /* only steal when first waiter is writing */ | ||
153 | fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
154 | if (fwaiter->type != RWSEM_WAITING_FOR_WRITE) | ||
155 | return 0; | ||
156 | |||
157 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; | 150 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; |
158 | /* Only one waiter in the queue: */ | 151 | if (list_is_singular(&sem->wait_list)) |
159 | if (fwaiter == waiter && waiter->list.next == &sem->wait_list) | ||
160 | adjustment -= RWSEM_WAITING_BIAS; | 152 | adjustment -= RWSEM_WAITING_BIAS; |
161 | 153 | ||
162 | try_again_write: | 154 | try_again_write: |
@@ -233,23 +225,18 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | |||
233 | /* we're now waiting on the lock, but no longer actively locking */ | 225 | /* we're now waiting on the lock, but no longer actively locking */ |
234 | count = rwsem_atomic_update(adjustment, sem); | 226 | count = rwsem_atomic_update(adjustment, sem); |
235 | 227 | ||
236 | /* If there are no active locks, wake the front queued process(es) up. | 228 | /* If there were already threads queued before us and there are no |
237 | * | 229 | * active writers, the lock must be read owned; so we try to wake |
238 | * Alternatively, if we're called from a failed down_write(), there | 230 | * any read locks that were queued ahead of us. */ |
239 | * were already threads queued before us and there are no active | 231 | if (count > RWSEM_WAITING_BIAS && |
240 | * writers, the lock must be read owned; so we try to wake any read | 232 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) |
241 | * locks that were queued ahead of us. */ | ||
242 | if (count == RWSEM_WAITING_BIAS) | ||
243 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | ||
244 | else if (count > RWSEM_WAITING_BIAS && | ||
245 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | ||
246 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | 233 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
247 | 234 | ||
248 | /* wait until we successfully acquire the lock */ | 235 | /* wait until we successfully acquire the lock */ |
249 | while (true) { | 236 | while (true) { |
250 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 237 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
251 | 238 | ||
252 | if (try_get_writer_sem(sem, &waiter)) | 239 | if (try_get_writer_sem(sem)) |
253 | break; | 240 | break; |
254 | 241 | ||
255 | raw_spin_unlock_irq(&sem->wait_lock); | 242 | raw_spin_unlock_irq(&sem->wait_lock); |