aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:20:19 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:55:57 -0500
commitd6fd3b11cea82346837957feab25b0be48aa424c (patch)
tree992f1de98ba935284feac7569eea7d86dcf5bdd7 /drivers/md
parent8304ad4dc818ffd701c2f3e90683b5b8013f44e2 (diff)
bcache: Explicitly track btree node's parent
This is prep work for the reworked btree insertion code. The way we set b->parent is ugly and hacky... the problem is, when btree_split() or garbage collection splits or rewrites a btree node, the parent changes for all its (potentially already cached) children. I may change this later and add some code to look through the btree node cache and find all our cached child nodes and change the parent pointer then... Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c14
-rw-r--r--drivers/md/bcache/btree.h16
2 files changed, 20 insertions, 10 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index d1734d9d9c79..87299baa8a1b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -884,6 +884,7 @@ out:
884 884
885 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_); 885 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
886 b->level = level; 886 b->level = level;
887 b->parent = (void *) ~0UL;
887 888
888 mca_reinit(b); 889 mca_reinit(b);
889 890
@@ -1898,7 +1899,7 @@ out:
1898 1899
1899static int btree_split(struct btree *b, struct btree_op *op) 1900static int btree_split(struct btree *b, struct btree_op *op)
1900{ 1901{
1901 bool split, root = b == b->c->root; 1902 bool split;
1902 struct btree *n1, *n2 = NULL, *n3 = NULL; 1903 struct btree *n1, *n2 = NULL, *n3 = NULL;
1903 uint64_t start_time = local_clock(); 1904 uint64_t start_time = local_clock();
1904 1905
@@ -1920,7 +1921,7 @@ static int btree_split(struct btree *b, struct btree_op *op)
1920 if (IS_ERR(n2)) 1921 if (IS_ERR(n2))
1921 goto err_free1; 1922 goto err_free1;
1922 1923
1923 if (root) { 1924 if (!b->parent) {
1924 n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl); 1925 n3 = bch_btree_node_alloc(b->c, b->level + 1, &op->cl);
1925 if (IS_ERR(n3)) 1926 if (IS_ERR(n3))
1926 goto err_free2; 1927 goto err_free2;
@@ -1928,7 +1929,8 @@ static int btree_split(struct btree *b, struct btree_op *op)
1928 1929
1929 bch_btree_insert_keys(n1, op); 1930 bch_btree_insert_keys(n1, op);
1930 1931
1931 /* Has to be a linear search because we don't have an auxiliary 1932 /*
1933 * Has to be a linear search because we don't have an auxiliary
1932 * search tree yet 1934 * search tree yet
1933 */ 1935 */
1934 1936
@@ -1960,6 +1962,8 @@ static int btree_split(struct btree *b, struct btree_op *op)
1960 bch_btree_node_write(n1, &op->cl); 1962 bch_btree_node_write(n1, &op->cl);
1961 1963
1962 if (n3) { 1964 if (n3) {
1965 /* Depth increases, make a new root */
1966
1963 bkey_copy_key(&n3->key, &MAX_KEY); 1967 bkey_copy_key(&n3->key, &MAX_KEY);
1964 bch_btree_insert_keys(n3, op); 1968 bch_btree_insert_keys(n3, op);
1965 bch_btree_node_write(n3, &op->cl); 1969 bch_btree_node_write(n3, &op->cl);
@@ -1967,7 +1971,9 @@ static int btree_split(struct btree *b, struct btree_op *op)
1967 closure_sync(&op->cl); 1971 closure_sync(&op->cl);
1968 bch_btree_set_root(n3); 1972 bch_btree_set_root(n3);
1969 rw_unlock(true, n3); 1973 rw_unlock(true, n3);
1970 } else if (root) { 1974 } else if (!b->parent) {
1975 /* Root filled up but didn't need to be split */
1976
1971 op->keys.top = op->keys.bottom; 1977 op->keys.top = op->keys.bottom;
1972 closure_sync(&op->cl); 1978 closure_sync(&op->cl);
1973 bch_btree_set_root(n1); 1979 bch_btree_set_root(n1);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 8a1c7e6bbbe3..6d2fb7550706 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -125,6 +125,7 @@ struct btree {
125 unsigned long seq; 125 unsigned long seq;
126 struct rw_semaphore lock; 126 struct rw_semaphore lock;
127 struct cache_set *c; 127 struct cache_set *c;
128 struct btree *parent;
128 129
129 unsigned long flags; 130 unsigned long flags;
130 uint16_t written; /* would be nice to kill */ 131 uint16_t written; /* would be nice to kill */
@@ -327,12 +328,13 @@ static inline void rw_unlock(bool w, struct btree *b)
327({ \ 328({ \
328 int _r, l = (b)->level - 1; \ 329 int _r, l = (b)->level - 1; \
329 bool _w = l <= (op)->lock; \ 330 bool _w = l <= (op)->lock; \
330 struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \ 331 struct btree *_child = bch_btree_node_get((b)->c, key, l, op); \
331 if (!IS_ERR(_b)) { \ 332 if (!IS_ERR(_child)) { \
332 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 333 _child->parent = (b); \
333 rw_unlock(_w, _b); \ 334 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
335 rw_unlock(_w, _child); \
334 } else \ 336 } else \
335 _r = PTR_ERR(_b); \ 337 _r = PTR_ERR(_child); \
336 _r; \ 338 _r; \
337}) 339})
338 340
@@ -350,8 +352,10 @@ static inline void rw_unlock(bool w, struct btree *b)
350 bool _w = insert_lock(op, _b); \ 352 bool _w = insert_lock(op, _b); \
351 rw_lock(_w, _b, _b->level); \ 353 rw_lock(_w, _b, _b->level); \
352 if (_b == (c)->root && \ 354 if (_b == (c)->root && \
353 _w == insert_lock(op, _b)) \ 355 _w == insert_lock(op, _b)) { \
356 _b->parent = NULL; \
354 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ 357 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
358 } \
355 rw_unlock(_w, _b); \ 359 rw_unlock(_w, _b); \
356 bch_cannibalize_unlock(c, &(op)->cl); \ 360 bch_cannibalize_unlock(c, &(op)->cl); \
357 } while (_r == -EINTR); \ 361 } while (_r == -EINTR); \