aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rwsem.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2010-08-09 20:21:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:45:11 -0400
commitfd41b33435ada87323cc86b50959fbffe35192c8 (patch)
tree6d2a7e79557e5de21bc91fce5b939c1f7dc1355c /lib/rwsem.c
parent70bdc6e0644f3535e93bac5c364ca199397e507e (diff)
rwsem: let RWSEM_WAITING_BIAS represent any number of waiting threads
Previously each waiting thread added a bias of RWSEM_WAITING_BIAS. With this change, the bias is added only once to indicate that the wait list is non-empty. This has a few nice properties which will be used in following changes: - when the spinlock is held and the waiter list is known to be non-empty, count < RWSEM_WAITING_BIAS <=> there is an active writer on that sem - count == RWSEM_WAITING_BIAS <=> there are waiting threads and no active readers/writers on that sem Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: David Howells <dhowells@redhat.com> Cc: Mike Waychison <mikew@google.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Ying Han <yinghan@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/rwsem.c')
-rw-r--r--lib/rwsem.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 94f2d7a9dc4f..a3e68bf5932e 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -60,7 +60,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
60 struct rwsem_waiter *waiter; 60 struct rwsem_waiter *waiter;
61 struct task_struct *tsk; 61 struct task_struct *tsk;
62 struct list_head *next; 62 struct list_head *next;
63 signed long oldcount, woken, loop; 63 signed long oldcount, woken, loop, adjustment;
64 64
65 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 65 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
66 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) 66 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
@@ -73,9 +73,12 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
73 * write lock. However, we only wake this writer if we can transition 73 * write lock. However, we only wake this writer if we can transition
74 * the active part of the count from 0 -> 1 74 * the active part of the count from 0 -> 1
75 */ 75 */
76 adjustment = RWSEM_ACTIVE_WRITE_BIAS;
77 if (waiter->list.next == &sem->wait_list)
78 adjustment -= RWSEM_WAITING_BIAS;
79
76 try_again_write: 80 try_again_write:
77 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) 81 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
78 - RWSEM_ACTIVE_BIAS;
79 if (oldcount & RWSEM_ACTIVE_MASK) 82 if (oldcount & RWSEM_ACTIVE_MASK)
80 /* Someone grabbed the sem already */ 83 /* Someone grabbed the sem already */
81 goto undo_write; 84 goto undo_write;
@@ -128,13 +131,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
128 131
129 } while (waiter->flags & RWSEM_WAITING_FOR_READ); 132 } while (waiter->flags & RWSEM_WAITING_FOR_READ);
130 133
131 loop = woken; 134 adjustment = woken * RWSEM_ACTIVE_READ_BIAS;
132 woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; 135 if (waiter->flags & RWSEM_WAITING_FOR_READ)
136 /* hit end of list above */
137 adjustment -= RWSEM_WAITING_BIAS;
133 138
134 rwsem_atomic_add(woken, sem); 139 rwsem_atomic_add(adjustment, sem);
135 140
136 next = sem->wait_list.next; 141 next = sem->wait_list.next;
137 for (; loop > 0; loop--) { 142 for (loop = woken; loop > 0; loop--) {
138 waiter = list_entry(next, struct rwsem_waiter, list); 143 waiter = list_entry(next, struct rwsem_waiter, list);
139 next = waiter->list.next; 144 next = waiter->list.next;
140 tsk = waiter->task; 145 tsk = waiter->task;
@@ -153,7 +158,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
153 /* undo the change to the active count, but check for a transition 158 /* undo the change to the active count, but check for a transition
154 * 1->0 */ 159 * 1->0 */
155 undo_write: 160 undo_write:
156 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) 161 if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
157 goto out; 162 goto out;
158 goto try_again_write; 163 goto try_again_write;
159} 164}
@@ -175,6 +180,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
175 waiter->task = tsk; 180 waiter->task = tsk;
176 get_task_struct(tsk); 181 get_task_struct(tsk);
177 182
183 if (list_empty(&sem->wait_list))
184 adjustment += RWSEM_WAITING_BIAS;
178 list_add_tail(&waiter->list, &sem->wait_list); 185 list_add_tail(&waiter->list, &sem->wait_list);
179 186
180 /* we're now waiting on the lock, but no longer actively locking */ 187 /* we're now waiting on the lock, but no longer actively locking */
@@ -208,8 +215,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
208 struct rwsem_waiter waiter; 215 struct rwsem_waiter waiter;
209 216
210 waiter.flags = RWSEM_WAITING_FOR_READ; 217 waiter.flags = RWSEM_WAITING_FOR_READ;
211 rwsem_down_failed_common(sem, &waiter, 218 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_READ_BIAS);
212 RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
213 return sem; 219 return sem;
214} 220}
215 221
@@ -222,7 +228,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
222 struct rwsem_waiter waiter; 228 struct rwsem_waiter waiter;
223 229
224 waiter.flags = RWSEM_WAITING_FOR_WRITE; 230 waiter.flags = RWSEM_WAITING_FOR_WRITE;
225 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); 231 rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_WRITE_BIAS);
226 232
227 return sem; 233 return sem;
228} 234}