aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:22:44 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:55:58 -0500
commit403b6cdeb1a38d896ffcb1f99ddcfd4e343b5d69 (patch)
tree2fa7640261ef7cfb861c1df470a86d8cef420b11 /drivers/md
parent26c949f8062cb9221a28b2228104f1cc5b265097 (diff)
bcache: Insert multiple keys at a time
We'll often end up with a list of adjacent keys to insert - because bch_data_insert() may have to fragment the data it writes. Originally, to simplify things and avoid having to deal with corner cases bch_btree_insert() would pass keys from this list one at a time to btree_insert_recurse() - mainly because the list of keys might span leaf nodes, so it was easier this way. With the btree_insert_node() refactoring, it's now a lot easier to just pass down the whole list and have btree_insert_recurse() iterate over leaf nodes until it's done. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index c2722e075f18..60d06465d772 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1858,10 +1858,14 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1858 BUG_ON(!insert_lock(op, b)); 1858 BUG_ON(!insert_lock(op, b));
1859 1859
1860 while (!bch_keylist_empty(insert_keys)) { 1860 while (!bch_keylist_empty(insert_keys)) {
1861 struct bset *i = write_block(b);
1861 struct bkey *k = insert_keys->bottom; 1862 struct bkey *k = insert_keys->bottom;
1862 1863
1863 if (b->level || 1864 if (b->written + __set_blocks(i, i->keys + bkey_u64s(k), b->c)
1864 bkey_cmp(k, &b->key) <= 0) { 1865 > btree_blocks(b))
1866 break;
1867
1868 if (bkey_cmp(k, &b->key) <= 0) {
1865 bkey_put(b->c, k, b->level); 1869 bkey_put(b->c, k, b->level);
1866 1870
1867 ret |= btree_insert_key(b, op, k); 1871 ret |= btree_insert_key(b, op, k);
@@ -1888,6 +1892,8 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1888 } 1892 }
1889 } 1893 }
1890 1894
1895 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1896
1891 BUG_ON(bch_count_data(b) < oldsize); 1897 BUG_ON(bch_count_data(b) < oldsize);
1892 return ret; 1898 return ret;
1893} 1899}
@@ -2073,6 +2079,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2073 &split_keys); 2079 &split_keys);
2074 insert_keys = &split_keys; 2080 insert_keys = &split_keys;
2075 b = parent; 2081 b = parent;
2082 if (!ret)
2083 ret = -EINTR;
2076 } 2084 }
2077 } else { 2085 } else {
2078 BUG_ON(write_block(b) != b->sets[b->nsets].data); 2086 BUG_ON(write_block(b) != b->sets[b->nsets].data);
@@ -2091,6 +2099,9 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2091 2099
2092static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op) 2100static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op)
2093{ 2101{
2102 if (bch_keylist_empty(&op->keys))
2103 return 0;
2104
2094 if (b->level) { 2105 if (b->level) {
2095 struct bkey *insert = op->keys.bottom; 2106 struct bkey *insert = op->keys.bottom;
2096 struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert)); 2107 struct bkey *k = bch_next_recurse_key(b, &START_KEY(insert));
@@ -2112,7 +2123,6 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op)
2112int bch_btree_insert(struct btree_op *op, struct cache_set *c) 2123int bch_btree_insert(struct btree_op *op, struct cache_set *c)
2113{ 2124{
2114 int ret = 0; 2125 int ret = 0;
2115 struct keylist stack_keys;
2116 2126
2117 /* 2127 /*
2118 * Don't want to block with the btree locked unless we have to, 2128 * Don't want to block with the btree locked unless we have to,
@@ -2121,17 +2131,9 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
2121 clear_closure_blocking(&op->cl); 2131 clear_closure_blocking(&op->cl);
2122 2132
2123 BUG_ON(bch_keylist_empty(&op->keys)); 2133 BUG_ON(bch_keylist_empty(&op->keys));
2124 bch_keylist_copy(&stack_keys, &op->keys);
2125 bch_keylist_init(&op->keys);
2126
2127 while (!bch_keylist_empty(&stack_keys) ||
2128 !bch_keylist_empty(&op->keys)) {
2129 if (bch_keylist_empty(&op->keys)) {
2130 bch_keylist_add(&op->keys,
2131 bch_keylist_pop(&stack_keys));
2132 op->lock = 0;
2133 }
2134 2134
2135 while (!bch_keylist_empty(&op->keys)) {
2136 op->lock = 0;
2135 ret = btree_root(insert_recurse, c, op); 2137 ret = btree_root(insert_recurse, c, op);
2136 2138
2137 if (ret == -EAGAIN) { 2139 if (ret == -EAGAIN) {
@@ -2143,14 +2145,11 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
2143 pr_err("error %i trying to insert key for %s", 2145 pr_err("error %i trying to insert key for %s",
2144 ret, op_type(op)); 2146 ret, op_type(op));
2145 2147
2146 while ((k = bch_keylist_pop(&stack_keys) ?: 2148 while ((k = bch_keylist_pop(&op->keys)))
2147 bch_keylist_pop(&op->keys)))
2148 bkey_put(c, k, 0); 2149 bkey_put(c, k, 0);
2149 } 2150 }
2150 } 2151 }
2151 2152
2152 bch_keylist_free(&stack_keys);
2153
2154 if (op->journal) 2153 if (op->journal)
2155 atomic_dec_bug(op->journal); 2154 atomic_dec_bug(op->journal);
2156 op->journal = NULL; 2155 op->journal = NULL;