aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rwsem-xadd.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/rwsem-xadd.c')
-rw-r--r--kernel/locking/rwsem-xadd.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3064c50e181e..09b180063ee1 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -180,7 +180,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
180 * but it gives the spinners an early indication that the 180 * but it gives the spinners an early indication that the
181 * readers now have the lock. 181 * readers now have the lock.
182 */ 182 */
183 rwsem_set_reader_owned(sem); 183 __rwsem_set_reader_owned(sem, waiter->task);
184 } 184 }
185 185
186 /* 186 /*
@@ -233,8 +233,19 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
233 waiter.type = RWSEM_WAITING_FOR_READ; 233 waiter.type = RWSEM_WAITING_FOR_READ;
234 234
235 raw_spin_lock_irq(&sem->wait_lock); 235 raw_spin_lock_irq(&sem->wait_lock);
236 if (list_empty(&sem->wait_list)) 236 if (list_empty(&sem->wait_list)) {
237 /*
238 * In case the wait queue is empty and the lock isn't owned
239 * by a writer, this reader can exit the slowpath and return
240 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
241 * been set in the count.
242 */
243 if (atomic_long_read(&sem->count) >= 0) {
244 raw_spin_unlock_irq(&sem->wait_lock);
245 return sem;
246 }
237 adjustment += RWSEM_WAITING_BIAS; 247 adjustment += RWSEM_WAITING_BIAS;
248 }
238 list_add_tail(&waiter.list, &sem->wait_list); 249 list_add_tail(&waiter.list, &sem->wait_list);
239 250
240 /* we're now waiting on the lock, but no longer actively locking */ 251 /* we're now waiting on the lock, but no longer actively locking */