diff options
author | Michel Lespinasse <walken@google.com> | 2013-05-07 09:45:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 10:20:16 -0400 |
commit | 1e78277ccbbb48af32a618d1ef0e8534e0b648d7 (patch) | |
tree | 033542c27093b7a24131bee12e84e8ca18cb19dc /lib | |
parent | f7dd1cee9a4e2b1450e4a3732636dfbf28562ee4 (diff) |
rwsem: move rwsem_down_failed_common code into rwsem_down_{read,write}_failed
Remove the rwsem_down_failed_common function and replace it with two
identical copies of its code in rwsem_down_{read,write}_failed.
This is because we want to make different optimizations in
rwsem_down_{read,write}_failed; we are adding this pure-duplication
step as a separate commit in order to make it easier to check the
following steps.
Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rwsem.c | 72 |
1 files changed, 57 insertions, 15 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c index 40636454cf3c..fb658af1c12c 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -178,12 +178,12 @@ try_again_write: | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * wait for a lock to be granted | 181 | * wait for the read lock to be granted |
182 | */ | 182 | */ |
183 | static struct rw_semaphore __sched * | 183 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
184 | rwsem_down_failed_common(struct rw_semaphore *sem, | ||
185 | enum rwsem_waiter_type type, signed long adjustment) | ||
186 | { | 184 | { |
185 | enum rwsem_waiter_type type = RWSEM_WAITING_FOR_READ; | ||
186 | signed long adjustment = -RWSEM_ACTIVE_READ_BIAS; | ||
187 | struct rwsem_waiter waiter; | 187 | struct rwsem_waiter waiter; |
188 | struct task_struct *tsk = current; | 188 | struct task_struct *tsk = current; |
189 | signed long count; | 189 | signed long count; |
@@ -238,21 +238,63 @@ rwsem_down_failed_common(struct rw_semaphore *sem, | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * wait for the read lock to be granted | ||
242 | */ | ||
243 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) | ||
244 | { | ||
245 | return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ, | ||
246 | -RWSEM_ACTIVE_READ_BIAS); | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * wait for the write lock to be granted | 241 | * wait for the write lock to be granted |
251 | */ | 242 | */ |
252 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) | 243 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) |
253 | { | 244 | { |
254 | return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, | 245 | enum rwsem_waiter_type type = RWSEM_WAITING_FOR_WRITE; |
255 | -RWSEM_ACTIVE_WRITE_BIAS); | 246 | signed long adjustment = -RWSEM_ACTIVE_WRITE_BIAS; |
247 | struct rwsem_waiter waiter; | ||
248 | struct task_struct *tsk = current; | ||
249 | signed long count; | ||
250 | |||
251 | /* set up my own style of waitqueue */ | ||
252 | waiter.task = tsk; | ||
253 | waiter.type = type; | ||
254 | get_task_struct(tsk); | ||
255 | |||
256 | raw_spin_lock_irq(&sem->wait_lock); | ||
257 | if (list_empty(&sem->wait_list)) | ||
258 | adjustment += RWSEM_WAITING_BIAS; | ||
259 | list_add_tail(&waiter.list, &sem->wait_list); | ||
260 | |||
261 | /* we're now waiting on the lock, but no longer actively locking */ | ||
262 | count = rwsem_atomic_update(adjustment, sem); | ||
263 | |||
264 | /* If there are no active locks, wake the front queued process(es) up. | ||
265 | * | ||
266 | * Alternatively, if we're called from a failed down_write(), there | ||
267 | * were already threads queued before us and there are no active | ||
268 | * writers, the lock must be read owned; so we try to wake any read | ||
269 | * locks that were queued ahead of us. */ | ||
270 | if (count == RWSEM_WAITING_BIAS) | ||
271 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | ||
272 | else if (count > RWSEM_WAITING_BIAS && | ||
273 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | ||
274 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | ||
275 | |||
276 | raw_spin_unlock_irq(&sem->wait_lock); | ||
277 | |||
278 | /* wait to be given the lock */ | ||
279 | while (true) { | ||
280 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
281 | if (!waiter.task) | ||
282 | break; | ||
283 | |||
284 | raw_spin_lock_irq(&sem->wait_lock); | ||
285 | /* Try to get the writer sem, may steal from the head writer: */ | ||
286 | if (type == RWSEM_WAITING_FOR_WRITE) | ||
287 | if (try_get_writer_sem(sem, &waiter)) { | ||
288 | raw_spin_unlock_irq(&sem->wait_lock); | ||
289 | return sem; | ||
290 | } | ||
291 | raw_spin_unlock_irq(&sem->wait_lock); | ||
292 | schedule(); | ||
293 | } | ||
294 | |||
295 | tsk->state = TASK_RUNNING; | ||
296 | |||
297 | return sem; | ||
256 | } | 298 | } |
257 | 299 | ||
258 | /* | 300 | /* |