aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c33
1 files changed, 0 insertions, 33 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 68fd9ccf1805..85506c4a3af7 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -25,21 +25,10 @@
25#include "extent_io.h" 25#include "extent_io.h"
26#include "locking.h" 26#include "locking.h"
27 27
28/*
29 * btrfs_header_level() isn't free, so don't call it when lockdep isn't
30 * on
31 */
32#ifdef CONFIG_DEBUG_LOCK_ALLOC
33static inline void spin_nested(struct extent_buffer *eb)
34{
35 spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
36}
37#else
38static inline void spin_nested(struct extent_buffer *eb) 28static inline void spin_nested(struct extent_buffer *eb)
39{ 29{
40 spin_lock(&eb->lock); 30 spin_lock(&eb->lock);
41} 31}
42#endif
43 32
44/* 33/*
45 * Setting a lock to blocking will drop the spinlock and set the 34 * Setting a lock to blocking will drop the spinlock and set the
@@ -236,25 +225,3 @@ int btrfs_tree_locked(struct extent_buffer *eb)
236 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) || 225 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
237 spin_is_locked(&eb->lock); 226 spin_is_locked(&eb->lock);
238} 227}
239
240/*
241 * btrfs_search_slot uses this to decide if it should drop its locks
242 * before doing something expensive like allocating free blocks for cow.
243 */
244int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
245{
246 int i;
247 struct extent_buffer *eb;
248
249 for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
250 eb = path->nodes[i];
251 if (!eb)
252 break;
253 smp_mb();
254 if (spin_is_contended(&eb->lock) ||
255 waitqueue_active(&eb->lock_wq))
256 return 1;
257 }
258 return 0;
259}
260