aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/btree.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:37:59 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:07 -0500
commitdf8e89701fb02cba6e09c5f46f002778b5b52dd2 (patch)
tree65092f8d1686620ba8cbf031e95bb575b585730b /drivers/md/bcache/btree.c
parent48dad8baf92fe8967d9e1358af1cfdda1d2d3298 (diff)
bcache: Move some stuff to btree.c
With the new btree_map() functions, we don't need to export the stuff needed for traversing the btree anymore. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r--drivers/md/bcache/btree.c95
1 files changed, 93 insertions, 2 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index cfbdcf349b7f..bfd60e6a2312 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -99,6 +99,13 @@ static const char *op_type(struct btree_op *op)
99 return op_types[op->type]; 99 return op_types[op->type];
100} 100}
101 101
102enum {
103 BTREE_INSERT_STATUS_INSERT,
104 BTREE_INSERT_STATUS_BACK_MERGE,
105 BTREE_INSERT_STATUS_OVERWROTE,
106 BTREE_INSERT_STATUS_FRONT_MERGE,
107};
108
102#define MAX_NEED_GC 64 109#define MAX_NEED_GC 64
103#define MAX_SAVE_PRIO 72 110#define MAX_SAVE_PRIO 72
104 111
@@ -116,6 +123,78 @@ void bch_btree_op_init_stack(struct btree_op *op)
116 op->lock = -1; 123 op->lock = -1;
117} 124}
118 125
126static inline bool should_split(struct btree *b)
127{
128 struct bset *i = write_block(b);
129 return b->written >= btree_blocks(b) ||
130 (b->written + __set_blocks(i, i->keys + 15, b->c)
131 > btree_blocks(b));
132}
133
134#define insert_lock(s, b) ((b)->level <= (s)->lock)
135
136/*
137 * These macros are for recursing down the btree - they handle the details of
138 * locking and looking up nodes in the cache for you. They're best treated as
139 * mere syntax when reading code that uses them.
140 *
141 * op->lock determines whether we take a read or a write lock at a given depth.
142 * If you've got a read lock and find that you need a write lock (i.e. you're
143 * going to have to split), set op->lock and return -EINTR; btree_root() will
144 * call you again and you'll have the correct lock.
145 */
146
147/**
148 * btree - recurse down the btree on a specified key
149 * @fn: function to call, which will be passed the child node
150 * @key: key to recurse on
151 * @b: parent btree node
152 * @op: pointer to struct btree_op
153 */
154#define btree(fn, key, b, op, ...) \
155({ \
156 int _r, l = (b)->level - 1; \
157 bool _w = l <= (op)->lock; \
158 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
159 if (!IS_ERR(_child)) { \
160 _child->parent = (b); \
161 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
162 rw_unlock(_w, _child); \
163 } else \
164 _r = PTR_ERR(_child); \
165 _r; \
166})
167
168/**
169 * btree_root - call a function on the root of the btree
170 * @fn: function to call, which will be passed the child node
171 * @c: cache set
172 * @op: pointer to struct btree_op
173 */
174#define btree_root(fn, c, op, ...) \
175({ \
176 int _r = -EINTR; \
177 do { \
178 struct btree *_b = (c)->root; \
179 bool _w = insert_lock(op, _b); \
180 rw_lock(_w, _b, _b->level); \
181 if (_b == (c)->root && \
182 _w == insert_lock(op, _b)) { \
183 _b->parent = NULL; \
184 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
185 } \
186 rw_unlock(_w, _b); \
187 bch_cannibalize_unlock(c); \
188 if (_r == -ENOSPC) { \
189 wait_event((c)->try_wait, \
190 !(c)->try_harder); \
191 _r = -EINTR; \
192 } \
193 } while (_r == -EINTR); \
194 \
195 _r; \
196})
197
119/* Btree key manipulation */ 198/* Btree key manipulation */
120 199
121void __bkey_put(struct cache_set *c, struct bkey *k) 200void __bkey_put(struct cache_set *c, struct bkey *k)
@@ -811,7 +890,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
811 * cannibalize_bucket() will take. This means every time we unlock the root of 890 * cannibalize_bucket() will take. This means every time we unlock the root of
812 * the btree, we need to release this lock if we have it held. 891 * the btree, we need to release this lock if we have it held.
813 */ 892 */
814void bch_cannibalize_unlock(struct cache_set *c) 893static void bch_cannibalize_unlock(struct cache_set *c)
815{ 894{
816 if (c->try_harder == current) { 895 if (c->try_harder == current) {
817 bch_time_stats_update(&c->try_harder_time, c->try_harder_start); 896 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
@@ -2262,7 +2341,7 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2262 return 0; 2341 return 0;
2263} 2342}
2264 2343
2265int bch_btree_search_recurse(struct btree *b, struct btree_op *op) 2344static int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2266{ 2345{
2267 struct search *s = container_of(op, struct search, op); 2346 struct search *s = container_of(op, struct search, op);
2268 struct bio *bio = &s->bio.bio; 2347 struct bio *bio = &s->bio.bio;
@@ -2296,6 +2375,18 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2296 return ret; 2375 return ret;
2297} 2376}
2298 2377
2378void bch_btree_search_async(struct closure *cl)
2379{
2380 struct btree_op *op = container_of(cl, struct btree_op, cl);
2381
2382 int ret = btree_root(search_recurse, op->c, op);
2383
2384 if (ret == -EAGAIN)
2385 continue_at(cl, bch_btree_search_async, bcache_wq);
2386
2387 closure_return(cl);
2388}
2389
2299/* Map across nodes or keys */ 2390/* Map across nodes or keys */
2300 2391
2301static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2392static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,