aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-07-24 20:37:59 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-11 00:56:07 -0500
commitdf8e89701fb02cba6e09c5f46f002778b5b52dd2 (patch)
tree65092f8d1686620ba8cbf031e95bb575b585730b /drivers/md
parent48dad8baf92fe8967d9e1358af1cfdda1d2d3298 (diff)
bcache: Move some stuff to btree.c
With the new btree_map() functions, we don't need to export the stuff needed for traversing the btree anymore. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c95
-rw-r--r--drivers/md/bcache/btree.h82
-rw-r--r--drivers/md/bcache/request.c16
3 files changed, 96 insertions, 97 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index cfbdcf349b7f..bfd60e6a2312 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -99,6 +99,13 @@ static const char *op_type(struct btree_op *op)
99 return op_types[op->type]; 99 return op_types[op->type];
100} 100}
101 101
102enum {
103 BTREE_INSERT_STATUS_INSERT,
104 BTREE_INSERT_STATUS_BACK_MERGE,
105 BTREE_INSERT_STATUS_OVERWROTE,
106 BTREE_INSERT_STATUS_FRONT_MERGE,
107};
108
102#define MAX_NEED_GC 64 109#define MAX_NEED_GC 64
103#define MAX_SAVE_PRIO 72 110#define MAX_SAVE_PRIO 72
104 111
@@ -116,6 +123,78 @@ void bch_btree_op_init_stack(struct btree_op *op)
116 op->lock = -1; 123 op->lock = -1;
117} 124}
118 125
126static inline bool should_split(struct btree *b)
127{
128 struct bset *i = write_block(b);
129 return b->written >= btree_blocks(b) ||
130 (b->written + __set_blocks(i, i->keys + 15, b->c)
131 > btree_blocks(b));
132}
133
134#define insert_lock(s, b) ((b)->level <= (s)->lock)
135
136/*
137 * These macros are for recursing down the btree - they handle the details of
138 * locking and looking up nodes in the cache for you. They're best treated as
139 * mere syntax when reading code that uses them.
140 *
141 * op->lock determines whether we take a read or a write lock at a given depth.
142 * If you've got a read lock and find that you need a write lock (i.e. you're
143 * going to have to split), set op->lock and return -EINTR; btree_root() will
144 * call you again and you'll have the correct lock.
145 */
146
147/**
148 * btree - recurse down the btree on a specified key
149 * @fn: function to call, which will be passed the child node
150 * @key: key to recurse on
151 * @b: parent btree node
152 * @op: pointer to struct btree_op
153 */
154#define btree(fn, key, b, op, ...) \
155({ \
156 int _r, l = (b)->level - 1; \
157 bool _w = l <= (op)->lock; \
158 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
159 if (!IS_ERR(_child)) { \
160 _child->parent = (b); \
161 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
162 rw_unlock(_w, _child); \
163 } else \
164 _r = PTR_ERR(_child); \
165 _r; \
166})
167
168/**
169 * btree_root - call a function on the root of the btree
170 * @fn: function to call, which will be passed the child node
171 * @c: cache set
172 * @op: pointer to struct btree_op
173 */
174#define btree_root(fn, c, op, ...) \
175({ \
176 int _r = -EINTR; \
177 do { \
178 struct btree *_b = (c)->root; \
179 bool _w = insert_lock(op, _b); \
180 rw_lock(_w, _b, _b->level); \
181 if (_b == (c)->root && \
182 _w == insert_lock(op, _b)) { \
183 _b->parent = NULL; \
184 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
185 } \
186 rw_unlock(_w, _b); \
187 bch_cannibalize_unlock(c); \
188 if (_r == -ENOSPC) { \
189 wait_event((c)->try_wait, \
190 !(c)->try_harder); \
191 _r = -EINTR; \
192 } \
193 } while (_r == -EINTR); \
194 \
195 _r; \
196})
197
119/* Btree key manipulation */ 198/* Btree key manipulation */
120 199
121void __bkey_put(struct cache_set *c, struct bkey *k) 200void __bkey_put(struct cache_set *c, struct bkey *k)
@@ -811,7 +890,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k)
811 * cannibalize_bucket() will take. This means every time we unlock the root of 890 * cannibalize_bucket() will take. This means every time we unlock the root of
812 * the btree, we need to release this lock if we have it held. 891 * the btree, we need to release this lock if we have it held.
813 */ 892 */
814void bch_cannibalize_unlock(struct cache_set *c) 893static void bch_cannibalize_unlock(struct cache_set *c)
815{ 894{
816 if (c->try_harder == current) { 895 if (c->try_harder == current) {
817 bch_time_stats_update(&c->try_harder_time, c->try_harder_start); 896 bch_time_stats_update(&c->try_harder_time, c->try_harder_start);
@@ -2262,7 +2341,7 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
2262 return 0; 2341 return 0;
2263} 2342}
2264 2343
2265int bch_btree_search_recurse(struct btree *b, struct btree_op *op) 2344static int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2266{ 2345{
2267 struct search *s = container_of(op, struct search, op); 2346 struct search *s = container_of(op, struct search, op);
2268 struct bio *bio = &s->bio.bio; 2347 struct bio *bio = &s->bio.bio;
@@ -2296,6 +2375,18 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
2296 return ret; 2375 return ret;
2297} 2376}
2298 2377
2378void bch_btree_search_async(struct closure *cl)
2379{
2380 struct btree_op *op = container_of(cl, struct btree_op, cl);
2381
2382 int ret = btree_root(search_recurse, op->c, op);
2383
2384 if (ret == -EAGAIN)
2385 continue_at(cl, bch_btree_search_async, bcache_wq);
2386
2387 closure_return(cl);
2388}
2389
2299/* Map across nodes or keys */ 2390/* Map across nodes or keys */
2300 2391
2301static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op, 2392static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index cafdeb01e219..1690f4731c1e 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -270,13 +270,6 @@ struct btree_op {
270 BKEY_PADDED(replace); 270 BKEY_PADDED(replace);
271}; 271};
272 272
273enum {
274 BTREE_INSERT_STATUS_INSERT,
275 BTREE_INSERT_STATUS_BACK_MERGE,
276 BTREE_INSERT_STATUS_OVERWROTE,
277 BTREE_INSERT_STATUS_FRONT_MERGE,
278};
279
280void bch_btree_op_init_stack(struct btree_op *); 273void bch_btree_op_init_stack(struct btree_op *);
281 274
282static inline void rw_lock(bool w, struct btree *b, int level) 275static inline void rw_lock(bool w, struct btree *b, int level)
@@ -302,82 +295,9 @@ static inline void rw_unlock(bool w, struct btree *b)
302 (w ? up_write : up_read)(&b->lock); 295 (w ? up_write : up_read)(&b->lock);
303} 296}
304 297
305#define insert_lock(s, b) ((b)->level <= (s)->lock)
306
307/*
308 * These macros are for recursing down the btree - they handle the details of
309 * locking and looking up nodes in the cache for you. They're best treated as
310 * mere syntax when reading code that uses them.
311 *
312 * op->lock determines whether we take a read or a write lock at a given depth.
313 * If you've got a read lock and find that you need a write lock (i.e. you're
314 * going to have to split), set op->lock and return -EINTR; btree_root() will
315 * call you again and you'll have the correct lock.
316 */
317
318/**
319 * btree - recurse down the btree on a specified key
320 * @fn: function to call, which will be passed the child node
321 * @key: key to recurse on
322 * @b: parent btree node
323 * @op: pointer to struct btree_op
324 */
325#define btree(fn, key, b, op, ...) \
326({ \
327 int _r, l = (b)->level - 1; \
328 bool _w = l <= (op)->lock; \
329 struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \
330 if (!IS_ERR(_child)) { \
331 _child->parent = (b); \
332 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
333 rw_unlock(_w, _child); \
334 } else \
335 _r = PTR_ERR(_child); \
336 _r; \
337})
338
339/**
340 * btree_root - call a function on the root of the btree
341 * @fn: function to call, which will be passed the child node
342 * @c: cache set
343 * @op: pointer to struct btree_op
344 */
345#define btree_root(fn, c, op, ...) \
346({ \
347 int _r = -EINTR; \
348 do { \
349 struct btree *_b = (c)->root; \
350 bool _w = insert_lock(op, _b); \
351 rw_lock(_w, _b, _b->level); \
352 if (_b == (c)->root && \
353 _w == insert_lock(op, _b)) { \
354 _b->parent = NULL; \
355 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
356 } \
357 rw_unlock(_w, _b); \
358 bch_cannibalize_unlock(c); \
359 if (_r == -ENOSPC) { \
360 wait_event((c)->try_wait, \
361 !(c)->try_harder); \
362 _r = -EINTR; \
363 } \
364 } while (_r == -EINTR); \
365 \
366 _r; \
367})
368
369static inline bool should_split(struct btree *b)
370{
371 struct bset *i = write_block(b);
372 return b->written >= btree_blocks(b) ||
373 (b->written + __set_blocks(i, i->keys + 15, b->c)
374 > btree_blocks(b));
375}
376
377void bch_btree_node_read(struct btree *); 298void bch_btree_node_read(struct btree *);
378void bch_btree_node_write(struct btree *, struct closure *); 299void bch_btree_node_write(struct btree *, struct closure *);
379 300
380void bch_cannibalize_unlock(struct cache_set *);
381void bch_btree_set_root(struct btree *); 301void bch_btree_set_root(struct btree *);
382struct btree *bch_btree_node_alloc(struct cache_set *, int); 302struct btree *bch_btree_node_alloc(struct cache_set *, int);
383struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); 303struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
@@ -386,7 +306,7 @@ int bch_btree_insert_check_key(struct btree *, struct btree_op *,
386 struct bkey *); 306 struct bkey *);
387int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *); 307int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *);
388 308
389int bch_btree_search_recurse(struct btree *, struct btree_op *); 309void bch_btree_search_async(struct closure *);
390 310
391int bch_gc_thread_start(struct cache_set *); 311int bch_gc_thread_start(struct cache_set *);
392size_t bch_btree_gc_finish(struct cache_set *); 312size_t bch_btree_gc_finish(struct cache_set *);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f779eb420d69..a16872541038 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -754,18 +754,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
754 return s; 754 return s;
755} 755}
756 756
757static void btree_read_async(struct closure *cl)
758{
759 struct btree_op *op = container_of(cl, struct btree_op, cl);
760
761 int ret = btree_root(search_recurse, op->c, op);
762
763 if (ret == -EAGAIN)
764 continue_at(cl, btree_read_async, bcache_wq);
765
766 closure_return(cl);
767}
768
769/* Cached devices */ 757/* Cached devices */
770 758
771static void cached_dev_bio_complete(struct closure *cl) 759static void cached_dev_bio_complete(struct closure *cl)
@@ -1087,7 +1075,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
1087{ 1075{
1088 struct closure *cl = &s->cl; 1076 struct closure *cl = &s->cl;
1089 1077
1090 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1078 closure_call(&s->op.cl, bch_btree_search_async, NULL, cl);
1091 continue_at(cl, cached_dev_read_done_bh, NULL); 1079 continue_at(cl, cached_dev_read_done_bh, NULL);
1092} 1080}
1093 1081
@@ -1351,7 +1339,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1351 1339
1352 closure_call(&s->op.cl, bch_data_insert, NULL, cl); 1340 closure_call(&s->op.cl, bch_data_insert, NULL, cl);
1353 } else { 1341 } else {
1354 closure_call(&s->op.cl, btree_read_async, NULL, cl); 1342 closure_call(&s->op.cl, bch_btree_search_async, NULL, cl);
1355 } 1343 }
1356 1344
1357 continue_at(cl, search_free, NULL); 1345 continue_at(cl, search_free, NULL);