aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-12 14:09:45 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-12 14:09:45 -0500
commit4008c04a07c73ec3cb1be4c1391d2159a8f75d6d (patch)
treec0b10a7287ac810bfc406541e32b850ad4580248 /fs/btrfs/locking.c
parent3f3420df505e47751ef76a652b5cb660e5360d6f (diff)
Btrfs: make a lockdep class for the extent buffer locks
Btrfs is currently using spin_lock_nested with a nested value based on the tree depth of the block. But, this doesn't quite work because the max tree depth is bigger than what spin_lock_nested can deal with, and because locks are sometimes taken before the level field is filled in. The solution here is to use lockdep_set_class_and_name instead, and to set the class before unlocking the pages when the block is read from the disk and just after init of a freshly allocated tree block. btrfs_clear_path_blocking is also changed to take the locks in the proper order, and it also makes sure all the locks currently held are properly set to blocking before it tries to retake the spinlocks. Otherwise, lockdep gets upset about bad lock orderin. The lockdep magic cam from Peter Zijlstra <peterz@infradead.org> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 9ebe9385129b..85506c4a3af7 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -25,21 +25,10 @@
25#include "extent_io.h" 25#include "extent_io.h"
26#include "locking.h" 26#include "locking.h"
27 27
28/*
29 * btrfs_header_level() isn't free, so don't call it when lockdep isn't
30 * on
31 */
32#ifdef CONFIG_DEBUG_LOCK_ALLOC
33static inline void spin_nested(struct extent_buffer *eb)
34{
35 spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
36}
37#else
38static inline void spin_nested(struct extent_buffer *eb) 28static inline void spin_nested(struct extent_buffer *eb)
39{ 29{
40 spin_lock(&eb->lock); 30 spin_lock(&eb->lock);
41} 31}
42#endif
43 32
44/* 33/*
45 * Setting a lock to blocking will drop the spinlock and set the 34 * Setting a lock to blocking will drop the spinlock and set the