aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-12 14:09:45 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-12 14:09:45 -0500
commit4008c04a07c73ec3cb1be4c1391d2159a8f75d6d (patch)
treec0b10a7287ac810bfc406541e32b850ad4580248 /fs/btrfs
parent3f3420df505e47751ef76a652b5cb660e5360d6f (diff)
Btrfs: make a lockdep class for the extent buffer locks
Btrfs is currently using spin_lock_nested with a nested value based on the tree depth of the block. But, this doesn't quite work because the max tree depth is bigger than what spin_lock_nested can deal with, and because locks are sometimes taken before the level field is filled in. The solution here is to use lockdep_set_class_and_name instead, and to set the class before unlocking the pages when the block is read from the disk and just after init of a freshly allocated tree block. btrfs_clear_path_blocking is also changed to take the locks in the proper order, and it also makes sure all the locks currently held are properly set to blocking before it tries to retake the spinlocks. Otherwise, lockdep gets upset about bad lock orderin. The lockdep magic cam from Peter Zijlstra <peterz@infradead.org> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.c45
-rw-r--r--fs/btrfs/ctree.h10
-rw-r--r--fs/btrfs/disk-io.c46
-rw-r--r--fs/btrfs/disk-io.h10
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/locking.c11
-rw-r--r--fs/btrfs/volumes.c2
7 files changed, 99 insertions, 32 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c8f4c540cc2c..42491d728e99 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -62,14 +62,38 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
62 62
63/* 63/*
64 * reset all the locked nodes in the patch to spinning locks. 64 * reset all the locked nodes in the patch to spinning locks.
65 *
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
69 * for held
65 */ 70 */
66noinline void btrfs_clear_path_blocking(struct btrfs_path *p) 71noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
72 struct extent_buffer *held)
67{ 73{
68 int i; 74 int i;
69 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 75
76#ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
81 * the path blocking.
82 */
83 if (held)
84 btrfs_set_lock_blocking(held);
85 btrfs_set_path_blocking(p);
86#endif
87
88 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
70 if (p->nodes[i] && p->locks[i]) 89 if (p->nodes[i] && p->locks[i])
71 btrfs_clear_lock_blocking(p->nodes[i]); 90 btrfs_clear_lock_blocking(p->nodes[i]);
72 } 91 }
92
93#ifdef CONFIG_DEBUG_LOCK_ALLOC
94 if (held)
95 btrfs_clear_lock_blocking(held);
96#endif
73} 97}
74 98
75/* this also releases the path */ 99/* this also releases the path */
@@ -279,7 +303,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
279 trans->transid, level, &ins); 303 trans->transid, level, &ins);
280 BUG_ON(ret); 304 BUG_ON(ret);
281 cow = btrfs_init_new_buffer(trans, root, prealloc_dest, 305 cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
282 buf->len); 306 buf->len, level);
283 } else { 307 } else {
284 cow = btrfs_alloc_free_block(trans, root, buf->len, 308 cow = btrfs_alloc_free_block(trans, root, buf->len,
285 parent_start, 309 parent_start,
@@ -1559,7 +1583,7 @@ cow_done:
1559 if (!p->skip_locking) 1583 if (!p->skip_locking)
1560 p->locks[level] = 1; 1584 p->locks[level] = 1;
1561 1585
1562 btrfs_clear_path_blocking(p); 1586 btrfs_clear_path_blocking(p, NULL);
1563 1587
1564 /* 1588 /*
1565 * we have a lock on b and as long as we aren't changing 1589 * we have a lock on b and as long as we aren't changing
@@ -1598,7 +1622,7 @@ cow_done:
1598 1622
1599 btrfs_set_path_blocking(p); 1623 btrfs_set_path_blocking(p);
1600 sret = split_node(trans, root, p, level); 1624 sret = split_node(trans, root, p, level);
1601 btrfs_clear_path_blocking(p); 1625 btrfs_clear_path_blocking(p, NULL);
1602 1626
1603 BUG_ON(sret > 0); 1627 BUG_ON(sret > 0);
1604 if (sret) { 1628 if (sret) {
@@ -1618,7 +1642,7 @@ cow_done:
1618 1642
1619 btrfs_set_path_blocking(p); 1643 btrfs_set_path_blocking(p);
1620 sret = balance_level(trans, root, p, level); 1644 sret = balance_level(trans, root, p, level);
1621 btrfs_clear_path_blocking(p); 1645 btrfs_clear_path_blocking(p, NULL);
1622 1646
1623 if (sret) { 1647 if (sret) {
1624 ret = sret; 1648 ret = sret;
@@ -1681,13 +1705,13 @@ cow_done:
1681 if (!p->skip_locking) { 1705 if (!p->skip_locking) {
1682 int lret; 1706 int lret;
1683 1707
1684 btrfs_clear_path_blocking(p); 1708 btrfs_clear_path_blocking(p, NULL);
1685 lret = btrfs_try_spin_lock(b); 1709 lret = btrfs_try_spin_lock(b);
1686 1710
1687 if (!lret) { 1711 if (!lret) {
1688 btrfs_set_path_blocking(p); 1712 btrfs_set_path_blocking(p);
1689 btrfs_tree_lock(b); 1713 btrfs_tree_lock(b);
1690 btrfs_clear_path_blocking(p); 1714 btrfs_clear_path_blocking(p, b);
1691 } 1715 }
1692 } 1716 }
1693 } else { 1717 } else {
@@ -1699,7 +1723,7 @@ cow_done:
1699 btrfs_set_path_blocking(p); 1723 btrfs_set_path_blocking(p);
1700 sret = split_leaf(trans, root, key, 1724 sret = split_leaf(trans, root, key,
1701 p, ins_len, ret == 0); 1725 p, ins_len, ret == 0);
1702 btrfs_clear_path_blocking(p); 1726 btrfs_clear_path_blocking(p, NULL);
1703 1727
1704 BUG_ON(sret > 0); 1728 BUG_ON(sret > 0);
1705 if (sret) { 1729 if (sret) {
@@ -3919,7 +3943,6 @@ find_next_key:
3919 btrfs_release_path(root, path); 3943 btrfs_release_path(root, path);
3920 goto again; 3944 goto again;
3921 } else { 3945 } else {
3922 btrfs_clear_path_blocking(path);
3923 goto out; 3946 goto out;
3924 } 3947 }
3925 } 3948 }
@@ -3939,7 +3962,7 @@ find_next_key:
3939 path->locks[level - 1] = 1; 3962 path->locks[level - 1] = 1;
3940 path->nodes[level - 1] = cur; 3963 path->nodes[level - 1] = cur;
3941 unlock_up(path, level, 1); 3964 unlock_up(path, level, 1);
3942 btrfs_clear_path_blocking(path); 3965 btrfs_clear_path_blocking(path, NULL);
3943 } 3966 }
3944out: 3967out:
3945 if (ret == 0) 3968 if (ret == 0)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3f7a8058df2b..766b31ae3186 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -43,11 +43,7 @@ struct btrfs_ordered_sum;
43 43
44#define BTRFS_ACL_NOT_CACHED ((void *)-1) 44#define BTRFS_ACL_NOT_CACHED ((void *)-1)
45 45
46#ifdef CONFIG_LOCKDEP 46#define BTRFS_MAX_LEVEL 8
47# define BTRFS_MAX_LEVEL 7
48#else
49# define BTRFS_MAX_LEVEL 8
50#endif
51 47
52/* holds pointers to all of the tree roots */ 48/* holds pointers to all of the tree roots */
53#define BTRFS_ROOT_TREE_OBJECTID 1ULL 49#define BTRFS_ROOT_TREE_OBJECTID 1ULL
@@ -1715,7 +1711,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1715 u64 empty_size); 1711 u64 empty_size);
1716struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 1712struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
1717 struct btrfs_root *root, 1713 struct btrfs_root *root,
1718 u64 bytenr, u32 blocksize); 1714 u64 bytenr, u32 blocksize,
1715 int level);
1719int btrfs_alloc_extent(struct btrfs_trans_handle *trans, 1716int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1720 struct btrfs_root *root, 1717 struct btrfs_root *root,
1721 u64 num_bytes, u64 parent, u64 min_bytes, 1718 u64 num_bytes, u64 parent, u64 min_bytes,
@@ -1835,7 +1832,6 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
1835struct btrfs_path *btrfs_alloc_path(void); 1832struct btrfs_path *btrfs_alloc_path(void);
1836void btrfs_free_path(struct btrfs_path *p); 1833void btrfs_free_path(struct btrfs_path *p);
1837void btrfs_set_path_blocking(struct btrfs_path *p); 1834void btrfs_set_path_blocking(struct btrfs_path *p);
1838void btrfs_clear_path_blocking(struct btrfs_path *p);
1839void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 1835void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
1840 1836
1841int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1837int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5aebddd71193..adda739a0215 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -75,6 +75,40 @@ struct async_submit_bio {
75 struct btrfs_work work; 75 struct btrfs_work work;
76}; 76};
77 77
78/* These are used to set the lockdep class on the extent buffer locks.
79 * The class is set by the readpage_end_io_hook after the buffer has
80 * passed csum validation but before the pages are unlocked.
81 *
82 * The lockdep class is also set by btrfs_init_new_buffer on freshly
83 * allocated blocks.
84 *
85 * The class is based on the level in the tree block, which allows lockdep
86 * to know that lower nodes nest inside the locks of higher nodes.
87 *
88 * We also add a check to make sure the highest level of the tree is
89 * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
90 * code needs update as well.
91 */
92#ifdef CONFIG_DEBUG_LOCK_ALLOC
93# if BTRFS_MAX_LEVEL != 8
94# error
95# endif
96static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
97static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
98 /* leaf */
99 "btrfs-extent-00",
100 "btrfs-extent-01",
101 "btrfs-extent-02",
102 "btrfs-extent-03",
103 "btrfs-extent-04",
104 "btrfs-extent-05",
105 "btrfs-extent-06",
106 "btrfs-extent-07",
107 /* highest possible level */
108 "btrfs-extent-08",
109};
110#endif
111
78/* 112/*
79 * extents on the btree inode are pretty simple, there's one extent 113 * extents on the btree inode are pretty simple, there's one extent
80 * that covers the entire device 114 * that covers the entire device
@@ -347,6 +381,15 @@ static int check_tree_block_fsid(struct btrfs_root *root,
347 return ret; 381 return ret;
348} 382}
349 383
384#ifdef CONFIG_DEBUG_LOCK_ALLOC
385void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
386{
387 lockdep_set_class_and_name(&eb->lock,
388 &btrfs_eb_class[level],
389 btrfs_eb_name[level]);
390}
391#endif
392
350static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, 393static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
351 struct extent_state *state) 394 struct extent_state *state)
352{ 395{
@@ -392,6 +435,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
392 } 435 }
393 found_level = btrfs_header_level(eb); 436 found_level = btrfs_header_level(eb);
394 437
438 btrfs_set_buffer_lockdep_class(eb, found_level);
439
395 ret = csum_tree_block(root, eb, 1); 440 ret = csum_tree_block(root, eb, 1);
396 if (ret) 441 if (ret)
397 ret = -EIO; 442 ret = -EIO;
@@ -1777,7 +1822,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1777 ret = find_and_setup_root(tree_root, fs_info, 1822 ret = find_and_setup_root(tree_root, fs_info,
1778 BTRFS_DEV_TREE_OBJECTID, dev_root); 1823 BTRFS_DEV_TREE_OBJECTID, dev_root);
1779 dev_root->track_dirty = 1; 1824 dev_root->track_dirty = 1;
1780
1781 if (ret) 1825 if (ret)
1782 goto fail_extent_root; 1826 goto fail_extent_root;
1783 1827
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 494a56eb2986..95029db227be 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -101,4 +101,14 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
101int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 101int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
102 struct btrfs_root *root); 102 struct btrfs_root *root);
103int btree_lock_page_hook(struct page *page); 103int btree_lock_page_hook(struct page *page);
104
105
106#ifdef CONFIG_DEBUG_LOCK_ALLOC
107void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
108#else
109static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
110 int level)
111{
112}
113#endif
104#endif 114#endif
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c59e12036e20..cd86bffbdc9f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3416,7 +3416,8 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3416 3416
3417struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 3417struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3418 struct btrfs_root *root, 3418 struct btrfs_root *root,
3419 u64 bytenr, u32 blocksize) 3419 u64 bytenr, u32 blocksize,
3420 int level)
3420{ 3421{
3421 struct extent_buffer *buf; 3422 struct extent_buffer *buf;
3422 3423
@@ -3424,6 +3425,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3424 if (!buf) 3425 if (!buf)
3425 return ERR_PTR(-ENOMEM); 3426 return ERR_PTR(-ENOMEM);
3426 btrfs_set_header_generation(buf, trans->transid); 3427 btrfs_set_header_generation(buf, trans->transid);
3428 btrfs_set_buffer_lockdep_class(buf, level);
3427 btrfs_tree_lock(buf); 3429 btrfs_tree_lock(buf);
3428 clean_tree_block(trans, root, buf); 3430 clean_tree_block(trans, root, buf);
3429 3431
@@ -3467,7 +3469,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3467 return ERR_PTR(ret); 3469 return ERR_PTR(ret);
3468 } 3470 }
3469 3471
3470 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize); 3472 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
3473 blocksize, level);
3471 return buf; 3474 return buf;
3472} 3475}
3473 3476
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 9ebe9385129b..85506c4a3af7 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -25,21 +25,10 @@
25#include "extent_io.h" 25#include "extent_io.h"
26#include "locking.h" 26#include "locking.h"
27 27
28/*
29 * btrfs_header_level() isn't free, so don't call it when lockdep isn't
30 * on
31 */
32#ifdef CONFIG_DEBUG_LOCK_ALLOC
33static inline void spin_nested(struct extent_buffer *eb)
34{
35 spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
36}
37#else
38static inline void spin_nested(struct extent_buffer *eb) 28static inline void spin_nested(struct extent_buffer *eb)
39{ 29{
40 spin_lock(&eb->lock); 30 spin_lock(&eb->lock);
41} 31}
42#endif
43 32
44/* 33/*
45 * Setting a lock to blocking will drop the spinlock and set the 34 * Setting a lock to blocking will drop the spinlock and set the
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c793b6f50d8d..1316139bf9e8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3102,6 +3102,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
3102 if (!sb) 3102 if (!sb)
3103 return -ENOMEM; 3103 return -ENOMEM;
3104 btrfs_set_buffer_uptodate(sb); 3104 btrfs_set_buffer_uptodate(sb);
3105 btrfs_set_buffer_lockdep_class(sb, 0);
3106
3105 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 3107 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3106 array_size = btrfs_super_sys_array_size(super_copy); 3108 array_size = btrfs_super_sys_array_size(super_copy);
3107 3109