aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/btree.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:20:19 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:55:57 -0500
commitd6fd3b11cea82346837957feab25b0be48aa424c (patch)
tree992f1de98ba935284feac7569eea7d86dcf5bdd7 /drivers/md/bcache/btree.c
parent8304ad4dc818ffd701c2f3e90683b5b8013f44e2 (diff)
bcache: Explicitly track btree node's parent
This is prep work for the reworked btree insertion code. The way we set b->parent is ugly and hacky... the problem is, when btree_split() or garbage collection splits or rewrites a btree node, the parent changes for all its (potentially already cached) children. I may change this later and add some code to look through the btree node cache and find all our cached child nodes and change the parent pointer then... Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r--drivers/md/bcache/btree.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index d1734d9d9c79..87299baa8a1b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -884,6 +884,7 @@ out:
884 884
885 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 885 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
886 b->level = level; 886 b->level = level;
887 b->parent = (void *) ~0UL;
887 888
888 mca_reinit(b); 889 mca_reinit(b);
889 890
@@ -1898,7 +1899,7 @@ out:
1898 1899
1899static int btree_split(struct btree *b, struct btree_op *op) 1900static int btree_split(struct btree *b, struct btree_op *op)
1900{ 1901{
1901 bool split, root = b == b->c->root; 1902 bool split;
1902 struct btree *n1, *n2 = NULL, *n3 = NULL; 1903 struct btree *n1, *n2 = NULL, *n3 = NULL;
1903 uint64_t start_time = local_clock(); 1904 uint64_t start_time = local_clock();
1904 1905
@@ -1920,7 +1921,7 @@ static int btree_split(struct btree *b, struct btree_op *op)
1920 if (IS_ERR(n2)) 1921 if (IS_ERR(n2))
1921 goto err_free1; 1922 goto err_free1;
1922 1923
1923 if (root) { 1924 if (!b->parent) {
1924 n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl); 1925 n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
1925 if (IS_ERR(n3)) 1926 if (IS_ERR(n3))
1926 goto err_free2; 1927 goto err_free2;
@@ -1928,7 +1929,8 @@ static int btree_split(struct btree *b, struct btree_op *op)
1928 1929
1929 bch_btree_insert_keys(n1, op); 1930 bch_btree_insert_keys(n1, op);
1930 1931
1931 /* Has to be a linear search because we don't have an auxiliary 1932 /*
1933 * Has to be a linear search because we don't have an auxiliary
1932 * search tree yet 1934 * search tree yet
1933 */ 1935 */
1934 1936
@@ -1960,6 +1962,8 @@ static int btree_split(struct btree *b, struct btree_op *op)
1960 bch_btree_node_write(n1, &op->cl); 1962 bch_btree_node_write(n1, &op->cl);
1961 1963
1962 if (n3) { 1964 if (n3) {
1965 /* Depth increases, make a new root */
1966
1963 bkey_copy_key(&n3->key, &MAX_KEY); 1967 bkey_copy_key(&n3->key, &MAX_KEY);
1964 bch_btree_insert_keys(n3, op); 1968 bch_btree_insert_keys(n3, op);
1965 bch_btree_node_write(n3, &op->cl); 1969 bch_btree_node_write(n3, &op->cl);
@@ -1967,7 +1971,9 @@ static int btree_split(struct btree *b, struct btree_op *op)
1967 closure_sync(&op->cl); 1971 closure_sync(&op->cl);
1968 bch_btree_set_root(n3); 1972 bch_btree_set_root(n3);
1969 rw_unlock(true, n3); 1973 rw_unlock(true, n3);
1970 } else if (root) { 1974 } else if (!b->parent) {
1975 /* Root filled up but didn't need to be split */
1976
1971 op->keys.top = op->keys.bottom; 1977 op->keys.top = op->keys.bottom;
1972 closure_sync(&op->cl); 1978 closure_sync(&op->cl);
1973 bch_btree_set_root(n1); 1979 bch_btree_set_root(n1);