diff options
-rw-r--r-- | fs/btrfs/locking.c | 80 |
1 files changed, 46 insertions, 34 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 01277b8f2373..5665d2149249 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -33,14 +33,14 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); | |||
33 | */ | 33 | */ |
34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
35 | { | 35 | { |
36 | if (eb->lock_nested) { | 36 | /* |
37 | read_lock(&eb->lock); | 37 | * no lock is required. The lock owner may change if |
38 | if (eb->lock_nested && current->pid == eb->lock_owner) { | 38 | * we have a read lock, but it won't change to or away |
39 | read_unlock(&eb->lock); | 39 | * from us. If we have the write lock, we are the owner |
40 | return; | 40 | * and it'll never change. |
41 | } | 41 | */ |
42 | read_unlock(&eb->lock); | 42 | if (eb->lock_nested && current->pid == eb->lock_owner) |
43 | } | 43 | return; |
44 | if (rw == BTRFS_WRITE_LOCK) { | 44 | if (rw == BTRFS_WRITE_LOCK) { |
45 | if (atomic_read(&eb->blocking_writers) == 0) { | 45 | if (atomic_read(&eb->blocking_writers) == 0) { |
46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | 46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
@@ -65,14 +65,15 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
65 | */ | 65 | */ |
66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
67 | { | 67 | { |
68 | if (eb->lock_nested) { | 68 | /* |
69 | read_lock(&eb->lock); | 69 | * no lock is required. The lock owner may change if |
70 | if (eb->lock_nested && current->pid == eb->lock_owner) { | 70 | * we have a read lock, but it won't change to or away |
71 | read_unlock(&eb->lock); | 71 | * from us. If we have the write lock, we are the owner |
72 | return; | 72 | * and it'll never change. |
73 | } | 73 | */ |
74 | read_unlock(&eb->lock); | 74 | if (eb->lock_nested && current->pid == eb->lock_owner) |
75 | } | 75 | return; |
76 | |||
76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { | 77 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | 78 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); |
78 | write_lock(&eb->lock); | 79 | write_lock(&eb->lock); |
@@ -99,6 +100,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) | |||
99 | void btrfs_tree_read_lock(struct extent_buffer *eb) | 100 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
100 | { | 101 | { |
101 | again: | 102 | again: |
103 | BUG_ON(!atomic_read(&eb->blocking_writers) && | ||
104 | current->pid == eb->lock_owner); | ||
105 | |||
102 | read_lock(&eb->lock); | 106 | read_lock(&eb->lock); |
103 | if (atomic_read(&eb->blocking_writers) && | 107 | if (atomic_read(&eb->blocking_writers) && |
104 | current->pid == eb->lock_owner) { | 108 | current->pid == eb->lock_owner) { |
@@ -132,7 +136,9 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb) | |||
132 | if (atomic_read(&eb->blocking_writers)) | 136 | if (atomic_read(&eb->blocking_writers)) |
133 | return 0; | 137 | return 0; |
134 | 138 | ||
135 | read_lock(&eb->lock); | 139 | if (!read_trylock(&eb->lock)) |
140 | return 0; | ||
141 | |||
136 | if (atomic_read(&eb->blocking_writers)) { | 142 | if (atomic_read(&eb->blocking_writers)) { |
137 | read_unlock(&eb->lock); | 143 | read_unlock(&eb->lock); |
138 | return 0; | 144 | return 0; |
@@ -151,7 +157,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
151 | if (atomic_read(&eb->blocking_writers) || | 157 | if (atomic_read(&eb->blocking_writers) || |
152 | atomic_read(&eb->blocking_readers)) | 158 | atomic_read(&eb->blocking_readers)) |
153 | return 0; | 159 | return 0; |
154 | write_lock(&eb->lock); | 160 | |
161 | if (!write_trylock(&eb->lock)) | ||
162 | return 0; | ||
163 | |||
155 | if (atomic_read(&eb->blocking_writers) || | 164 | if (atomic_read(&eb->blocking_writers) || |
156 | atomic_read(&eb->blocking_readers)) { | 165 | atomic_read(&eb->blocking_readers)) { |
157 | write_unlock(&eb->lock); | 166 | write_unlock(&eb->lock); |
@@ -168,14 +177,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
168 | */ | 177 | */ |
169 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | 178 | void btrfs_tree_read_unlock(struct extent_buffer *eb) |
170 | { | 179 | { |
171 | if (eb->lock_nested) { | 180 | /* |
172 | read_lock(&eb->lock); | 181 | * if we're nested, we have the write lock. No new locking |
173 | if (eb->lock_nested && current->pid == eb->lock_owner) { | 182 | * is needed as long as we are the lock owner. |
174 | eb->lock_nested = 0; | 183 | * The write unlock will do a barrier for us, and the lock_nested |
175 | read_unlock(&eb->lock); | 184 | * field only matters to the lock owner. |
176 | return; | 185 | */ |
177 | } | 186 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
178 | read_unlock(&eb->lock); | 187 | eb->lock_nested = 0; |
188 | return; | ||
179 | } | 189 | } |
180 | btrfs_assert_tree_read_locked(eb); | 190 | btrfs_assert_tree_read_locked(eb); |
181 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | 191 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
@@ -189,14 +199,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) | |||
189 | */ | 199 | */ |
190 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | 200 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |
191 | { | 201 | { |
192 | if (eb->lock_nested) { | 202 | /* |
193 | read_lock(&eb->lock); | 203 | * if we're nested, we have the write lock. No new locking |
194 | if (eb->lock_nested && current->pid == eb->lock_owner) { | 204 | * is needed as long as we are the lock owner. |
195 | eb->lock_nested = 0; | 205 | * The write unlock will do a barrier for us, and the lock_nested |
196 | read_unlock(&eb->lock); | 206 | * field only matters to the lock owner. |
197 | return; | 207 | */ |
198 | } | 208 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
199 | read_unlock(&eb->lock); | 209 | eb->lock_nested = 0; |
210 | return; | ||
200 | } | 211 | } |
201 | btrfs_assert_tree_read_locked(eb); | 212 | btrfs_assert_tree_read_locked(eb); |
202 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | 213 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
@@ -244,6 +255,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb) | |||
244 | BUG_ON(blockers > 1); | 255 | BUG_ON(blockers > 1); |
245 | 256 | ||
246 | btrfs_assert_tree_locked(eb); | 257 | btrfs_assert_tree_locked(eb); |
258 | eb->lock_owner = 0; | ||
247 | atomic_dec(&eb->write_locks); | 259 | atomic_dec(&eb->write_locks); |
248 | 260 | ||
249 | if (blockers) { | 261 | if (blockers) { |