aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c62
1 files changed, 28 insertions, 34 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 2f6c3c7851ed..98fccce4208c 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -15,19 +15,19 @@
15#ifdef CONFIG_BTRFS_DEBUG 15#ifdef CONFIG_BTRFS_DEBUG
16static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) 16static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
17{ 17{
18 WARN_ON(atomic_read(&eb->spinning_writers)); 18 WARN_ON(eb->spinning_writers);
19 atomic_inc(&eb->spinning_writers); 19 eb->spinning_writers++;
20} 20}
21 21
22static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) 22static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
23{ 23{
24 WARN_ON(atomic_read(&eb->spinning_writers) != 1); 24 WARN_ON(eb->spinning_writers != 1);
25 atomic_dec(&eb->spinning_writers); 25 eb->spinning_writers--;
26} 26}
27 27
28static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) 28static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
29{ 29{
30 WARN_ON(atomic_read(&eb->spinning_writers)); 30 WARN_ON(eb->spinning_writers);
31} 31}
32 32
33static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) 33static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
@@ -58,17 +58,17 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
58 58
59static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) 59static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
60{ 60{
61 atomic_inc(&eb->write_locks); 61 eb->write_locks++;
62} 62}
63 63
64static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) 64static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
65{ 65{
66 atomic_dec(&eb->write_locks); 66 eb->write_locks--;
67} 67}
68 68
69void btrfs_assert_tree_locked(struct extent_buffer *eb) 69void btrfs_assert_tree_locked(struct extent_buffer *eb)
70{ 70{
71 BUG_ON(!atomic_read(&eb->write_locks)); 71 BUG_ON(!eb->write_locks);
72} 72}
73 73
74#else 74#else
@@ -111,10 +111,10 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
111 */ 111 */
112 if (eb->lock_nested && current->pid == eb->lock_owner) 112 if (eb->lock_nested && current->pid == eb->lock_owner)
113 return; 113 return;
114 if (atomic_read(&eb->blocking_writers) == 0) { 114 if (eb->blocking_writers == 0) {
115 btrfs_assert_spinning_writers_put(eb); 115 btrfs_assert_spinning_writers_put(eb);
116 btrfs_assert_tree_locked(eb); 116 btrfs_assert_tree_locked(eb);
117 atomic_inc(&eb->blocking_writers); 117 eb->blocking_writers++;
118 write_unlock(&eb->lock); 118 write_unlock(&eb->lock);
119 } 119 }
120} 120}
@@ -148,12 +148,11 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
148 */ 148 */
149 if (eb->lock_nested && current->pid == eb->lock_owner) 149 if (eb->lock_nested && current->pid == eb->lock_owner)
150 return; 150 return;
151 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
152 write_lock(&eb->lock); 151 write_lock(&eb->lock);
152 BUG_ON(eb->blocking_writers != 1);
153 btrfs_assert_spinning_writers_get(eb); 153 btrfs_assert_spinning_writers_get(eb);
154 /* atomic_dec_and_test implies a barrier */ 154 if (--eb->blocking_writers == 0)
155 if (atomic_dec_and_test(&eb->blocking_writers)) 155 cond_wake_up(&eb->write_lock_wq);
156 cond_wake_up_nomb(&eb->write_lock_wq);
157} 156}
158 157
159/* 158/*
@@ -167,12 +166,10 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
167 if (trace_btrfs_tree_read_lock_enabled()) 166 if (trace_btrfs_tree_read_lock_enabled())
168 start_ns = ktime_get_ns(); 167 start_ns = ktime_get_ns();
169again: 168again:
170 BUG_ON(!atomic_read(&eb->blocking_writers) &&
171 current->pid == eb->lock_owner);
172
173 read_lock(&eb->lock); 169 read_lock(&eb->lock);
174 if (atomic_read(&eb->blocking_writers) && 170 BUG_ON(eb->blocking_writers == 0 &&
175 current->pid == eb->lock_owner) { 171 current->pid == eb->lock_owner);
172 if (eb->blocking_writers && current->pid == eb->lock_owner) {
176 /* 173 /*
177 * This extent is already write-locked by our thread. We allow 174 * This extent is already write-locked by our thread. We allow
178 * an additional read lock to be added because it's for the same 175 * an additional read lock to be added because it's for the same
@@ -185,10 +182,10 @@ again:
185 trace_btrfs_tree_read_lock(eb, start_ns); 182 trace_btrfs_tree_read_lock(eb, start_ns);
186 return; 183 return;
187 } 184 }
188 if (atomic_read(&eb->blocking_writers)) { 185 if (eb->blocking_writers) {
189 read_unlock(&eb->lock); 186 read_unlock(&eb->lock);
190 wait_event(eb->write_lock_wq, 187 wait_event(eb->write_lock_wq,
191 atomic_read(&eb->blocking_writers) == 0); 188 eb->blocking_writers == 0);
192 goto again; 189 goto again;
193 } 190 }
194 btrfs_assert_tree_read_locks_get(eb); 191 btrfs_assert_tree_read_locks_get(eb);
@@ -203,11 +200,11 @@ again:
203 */ 200 */
204int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) 201int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
205{ 202{
206 if (atomic_read(&eb->blocking_writers)) 203 if (eb->blocking_writers)
207 return 0; 204 return 0;
208 205
209 read_lock(&eb->lock); 206 read_lock(&eb->lock);
210 if (atomic_read(&eb->blocking_writers)) { 207 if (eb->blocking_writers) {
211 read_unlock(&eb->lock); 208 read_unlock(&eb->lock);
212 return 0; 209 return 0;
213 } 210 }
@@ -223,13 +220,13 @@ int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
223 */ 220 */
224int btrfs_try_tree_read_lock(struct extent_buffer *eb) 221int btrfs_try_tree_read_lock(struct extent_buffer *eb)
225{ 222{
226 if (atomic_read(&eb->blocking_writers)) 223 if (eb->blocking_writers)
227 return 0; 224 return 0;
228 225
229 if (!read_trylock(&eb->lock)) 226 if (!read_trylock(&eb->lock))
230 return 0; 227 return 0;
231 228
232 if (atomic_read(&eb->blocking_writers)) { 229 if (eb->blocking_writers) {
233 read_unlock(&eb->lock); 230 read_unlock(&eb->lock);
234 return 0; 231 return 0;
235 } 232 }
@@ -245,13 +242,11 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
245 */ 242 */
246int btrfs_try_tree_write_lock(struct extent_buffer *eb) 243int btrfs_try_tree_write_lock(struct extent_buffer *eb)
247{ 244{
248 if (atomic_read(&eb->blocking_writers) || 245 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
249 atomic_read(&eb->blocking_readers))
250 return 0; 246 return 0;
251 247
252 write_lock(&eb->lock); 248 write_lock(&eb->lock);
253 if (atomic_read(&eb->blocking_writers) || 249 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
254 atomic_read(&eb->blocking_readers)) {
255 write_unlock(&eb->lock); 250 write_unlock(&eb->lock);
256 return 0; 251 return 0;
257 } 252 }
@@ -322,10 +317,9 @@ void btrfs_tree_lock(struct extent_buffer *eb)
322 WARN_ON(eb->lock_owner == current->pid); 317 WARN_ON(eb->lock_owner == current->pid);
323again: 318again:
324 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); 319 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
325 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); 320 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
326 write_lock(&eb->lock); 321 write_lock(&eb->lock);
327 if (atomic_read(&eb->blocking_readers) || 322 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
328 atomic_read(&eb->blocking_writers)) {
329 write_unlock(&eb->lock); 323 write_unlock(&eb->lock);
330 goto again; 324 goto again;
331 } 325 }
@@ -340,7 +334,7 @@ again:
340 */ 334 */
341void btrfs_tree_unlock(struct extent_buffer *eb) 335void btrfs_tree_unlock(struct extent_buffer *eb)
342{ 336{
343 int blockers = atomic_read(&eb->blocking_writers); 337 int blockers = eb->blocking_writers;
344 338
345 BUG_ON(blockers > 1); 339 BUG_ON(blockers > 1);
346 340
@@ -351,7 +345,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
351 345
352 if (blockers) { 346 if (blockers) {
353 btrfs_assert_no_spinning_writers(eb); 347 btrfs_assert_no_spinning_writers(eb);
354 atomic_dec(&eb->blocking_writers); 348 eb->blocking_writers--;
355 /* Use the lighter barrier after atomic */ 349 /* Use the lighter barrier after atomic */
356 smp_mb__after_atomic(); 350 smp_mb__after_atomic();
357 cond_wake_up_nomb(&eb->write_lock_wq); 351 cond_wake_up_nomb(&eb->write_lock_wq);