diff options
author | Michel Lespinasse <walken@google.com> | 2010-08-09 20:21:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 23:45:10 -0400 |
commit | 345af7bf3304410634c21ada4664fda83d4d9a16 (patch) | |
tree | 1fd8b49ef79796e561cce19794af371099f5e7ca /lib/rwsem.c | |
parent | 7721fea3d0fd93fb4d000eb737b444369358d6d3 (diff) |
rwsem: fully separate code paths to wake writers vs readers
This is in preparation for later changes in the series.
In __rwsem_do_wake(), the first queued waiter is checked first in order to
determine whether it's a writer or a reader. The code paths diverge at
this point. The code that checks and increments the rwsem active count is
duplicated on both sides - the point is that later changes in the series
will be able to independently modify both sides.
Signed-off-by: Michel Lespinasse <walken@google.com>
Acked-by: David Howells <dhowells@redhat.com>
Cc: Mike Waychison <mikew@google.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Ying Han <yinghan@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/rwsem.c')
-rw-r--r-- | lib/rwsem.c | 61 |
1 files changed, 34 insertions, 27 deletions
diff --git a/lib/rwsem.c b/lib/rwsem.c index ceba8e28807a..917fd946b495 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -41,7 +41,7 @@ struct rwsem_waiter { | |||
41 | * - if we come here from up_xxxx(), then: | 41 | * - if we come here from up_xxxx(), then: |
42 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | 42 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) |
43 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | 43 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) |
44 | * - there must be someone on the queue | 44 | * - there must be someone on the queue |
45 | * - the spinlock must be held by the caller | 45 | * - the spinlock must be held by the caller |
46 | * - woken process blocks are discarded from the list after having task zeroed | 46 | * - woken process blocks are discarded from the list after having task zeroed |
47 | * - writers are only woken if downgrading is false | 47 | * - writers are only woken if downgrading is false |
@@ -54,26 +54,23 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
54 | struct list_head *next; | 54 | struct list_head *next; |
55 | signed long oldcount, woken, loop; | 55 | signed long oldcount, woken, loop; |
56 | 56 | ||
57 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
58 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | ||
59 | goto readers_only; | ||
60 | |||
57 | if (downgrading) | 61 | if (downgrading) |
58 | goto dont_wake_writers; | 62 | goto out; |
59 | 63 | ||
60 | /* if we came through an up_xxxx() call, we only only wake someone up | 64 | /* There's a writer at the front of the queue - try to grant it the |
61 | * if we can transition the active part of the count from 0 -> 1 | 65 | * write lock. However, we only wake this writer if we can transition |
66 | * the active part of the count from 0 -> 1 | ||
62 | */ | 67 | */ |
63 | try_again: | 68 | try_again_write: |
64 | oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) | 69 | oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) |
65 | - RWSEM_ACTIVE_BIAS; | 70 | - RWSEM_ACTIVE_BIAS; |
66 | if (oldcount & RWSEM_ACTIVE_MASK) | 71 | if (oldcount & RWSEM_ACTIVE_MASK) |
67 | goto undo; | 72 | /* Someone grabbed the sem already */ |
68 | 73 | goto undo_write; | |
69 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | ||
70 | |||
71 | /* try to grant a single write lock if there's a writer at the front | ||
72 | * of the queue - note we leave the 'active part' of the count | ||
73 | * incremented by 1 and the waiting part incremented by 0x00010000 | ||
74 | */ | ||
75 | if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) | ||
76 | goto readers_only; | ||
77 | 74 | ||
78 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | 75 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. |
79 | * It is an allocated on the waiter's stack and may become invalid at | 76 | * It is an allocated on the waiter's stack and may become invalid at |
@@ -87,18 +84,24 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
87 | put_task_struct(tsk); | 84 | put_task_struct(tsk); |
88 | goto out; | 85 | goto out; |
89 | 86 | ||
90 | /* don't want to wake any writers */ | 87 | readers_only: |
91 | dont_wake_writers: | 88 | if (downgrading) |
92 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 89 | goto wake_readers; |
93 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | 90 | |
94 | goto out; | 91 | /* if we came through an up_xxxx() call, we only only wake someone up |
92 | * if we can transition the active part of the count from 0 -> 1 */ | ||
93 | try_again_read: | ||
94 | oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) | ||
95 | - RWSEM_ACTIVE_BIAS; | ||
96 | if (oldcount & RWSEM_ACTIVE_MASK) | ||
97 | /* Someone grabbed the sem already */ | ||
98 | goto undo_read; | ||
95 | 99 | ||
96 | /* grant an infinite number of read locks to the readers at the front | 100 | wake_readers: |
97 | * of the queue | 101 | /* Grant an infinite number of read locks to the readers at the front |
98 | * - note we increment the 'active part' of the count by the number of | 102 | * of the queue. Note we increment the 'active part' of the count by |
99 | * readers before waking any processes up | 103 | * the number of readers before waking any processes up. |
100 | */ | 104 | */ |
101 | readers_only: | ||
102 | woken = 0; | 105 | woken = 0; |
103 | do { | 106 | do { |
104 | woken++; | 107 | woken++; |
@@ -138,10 +141,14 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading) | |||
138 | 141 | ||
139 | /* undo the change to the active count, but check for a transition | 142 | /* undo the change to the active count, but check for a transition |
140 | * 1->0 */ | 143 | * 1->0 */ |
141 | undo: | 144 | undo_write: |
145 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) | ||
146 | goto out; | ||
147 | goto try_again_write; | ||
148 | undo_read: | ||
142 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) | 149 | if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK) |
143 | goto out; | 150 | goto out; |
144 | goto try_again; | 151 | goto try_again_read; |
145 | } | 152 | } |
146 | 153 | ||
147 | /* | 154 | /* |