diff options
Diffstat (limited to 'lib/rwsem.c')
-rw-r--r-- | lib/rwsem.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c index a3e68bf5932e..318d435dcebb 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -67,6 +67,9 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |||
67 | goto readers_only; | 67 | goto readers_only; |
68 | 68 | ||
69 | if (wake_type == RWSEM_WAKE_READ_OWNED) | 69 | if (wake_type == RWSEM_WAKE_READ_OWNED) |
70 | /* Another active reader was observed, so wakeup is not | ||
71 | * likely to succeed. Save the atomic op. | ||
72 | */ | ||
70 | goto out; | 73 | goto out; |
71 | 74 | ||
72 | /* There's a writer at the front of the queue - try to grant it the | 75 | /* There's a writer at the front of the queue - try to grant it the |
@@ -111,8 +114,8 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |||
111 | * count adjustment pretty soon. | 114 | * count adjustment pretty soon. |
112 | */ | 115 | */ |
113 | if (wake_type == RWSEM_WAKE_ANY && | 116 | if (wake_type == RWSEM_WAKE_ANY && |
114 | (rwsem_atomic_update(0, sem) & RWSEM_ACTIVE_MASK)) | 117 | rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) |
115 | /* Someone grabbed the sem already */ | 118 | /* Someone grabbed the sem for write already */ |
116 | goto out; | 119 | goto out; |
117 | 120 | ||
118 | /* Grant an infinite number of read locks to the readers at the front | 121 | /* Grant an infinite number of read locks to the readers at the front |
@@ -187,9 +190,17 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
187 | /* we're now waiting on the lock, but no longer actively locking */ | 190 | /* we're now waiting on the lock, but no longer actively locking */ |
188 | count = rwsem_atomic_update(adjustment, sem); | 191 | count = rwsem_atomic_update(adjustment, sem); |
189 | 192 | ||
190 | /* if there are no active locks, wake the front queued process(es) up */ | 193 | /* If there are no active locks, wake the front queued process(es) up. |
191 | if (!(count & RWSEM_ACTIVE_MASK)) | 194 | * |
195 | * Alternatively, if we're called from a failed down_write(), there | ||
196 | * were already threads queued before us and there are no active | ||
197 | * writers, the lock must be read owned; so we try to wake any read | ||
198 | * locks that were queued ahead of us. */ | ||
199 | if (count == RWSEM_WAITING_BIAS) | ||
192 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | 200 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); |
201 | else if (count > RWSEM_WAITING_BIAS && | ||
202 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | ||
203 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | ||
193 | 204 | ||
194 | spin_unlock_irq(&sem->wait_lock); | 205 | spin_unlock_irq(&sem->wait_lock); |
195 | 206 | ||