aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/btree.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/btree.h')
-rw-r--r--drivers/md/bcache/btree.h195
1 files changed, 65 insertions, 130 deletions
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 3333d3723633..767e75570896 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -125,6 +125,7 @@ struct btree {
125 unsigned long seq; 125 unsigned long seq;
126 struct rw_semaphore lock; 126 struct rw_semaphore lock;
127 struct cache_set *c; 127 struct cache_set *c;
128 struct btree *parent;
128 129
129 unsigned long flags; 130 unsigned long flags;
130 uint16_t written; /* would be nice to kill */ 131 uint16_t written; /* would be nice to kill */
@@ -200,12 +201,7 @@ static inline bool bkey_written(struct btree *b, struct bkey *k)
200 201
201static inline void set_gc_sectors(struct cache_set *c) 202static inline void set_gc_sectors(struct cache_set *c)
202{ 203{
203 atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 8); 204 atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
204}
205
206static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
207{
208 return __bch_ptr_invalid(b->c, b->level, k);
209} 205}
210 206
211static inline struct bkey *bch_btree_iter_init(struct btree *b, 207static inline struct bkey *bch_btree_iter_init(struct btree *b,
@@ -215,6 +211,16 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
215 return __bch_btree_iter_init(b, iter, search, b->sets); 211 return __bch_btree_iter_init(b, iter, search, b->sets);
216} 212}
217 213
214static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
215{
216 if (b->level)
217 return bch_btree_ptr_invalid(b->c, k);
218 else
219 return bch_extent_ptr_invalid(b->c, k);
220}
221
222void bkey_put(struct cache_set *c, struct bkey *k);
223
218/* Looping macros */ 224/* Looping macros */
219 225
220#define for_each_cached_btree(b, c, iter) \ 226#define for_each_cached_btree(b, c, iter) \
@@ -234,51 +240,17 @@ static inline struct bkey *bch_btree_iter_init(struct btree *b,
234/* Recursing down the btree */ 240/* Recursing down the btree */
235 241
236struct btree_op { 242struct btree_op {
237 struct closure cl;
238 struct cache_set *c;
239
240 /* Journal entry we have a refcount on */
241 atomic_t *journal;
242
243 /* Bio to be inserted into the cache */
244 struct bio *cache_bio;
245
246 unsigned inode;
247
248 uint16_t write_prio;
249
250 /* Btree level at which we start taking write locks */ 243 /* Btree level at which we start taking write locks */
251 short lock; 244 short lock;
252 245
253 /* Btree insertion type */
254 enum {
255 BTREE_INSERT,
256 BTREE_REPLACE
257 } type:8;
258
259 unsigned csum:1;
260 unsigned skip:1;
261 unsigned flush_journal:1;
262
263 unsigned insert_data_done:1;
264 unsigned lookup_done:1;
265 unsigned insert_collision:1; 246 unsigned insert_collision:1;
266
267 /* Anything after this point won't get zeroed in do_bio_hook() */
268
269 /* Keys to be inserted */
270 struct keylist keys;
271 BKEY_PADDED(replace);
272}; 247};
273 248
274enum { 249static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
275 BTREE_INSERT_STATUS_INSERT, 250{
276 BTREE_INSERT_STATUS_BACK_MERGE, 251 memset(op, 0, sizeof(struct btree_op));
277 BTREE_INSERT_STATUS_OVERWROTE, 252 op->lock = write_lock_level;
278 BTREE_INSERT_STATUS_FRONT_MERGE, 253}
279};
280
281void bch_btree_op_init_stack(struct btree_op *);
282 254
283static inline void rw_lock(bool w, struct btree *b, int level) 255static inline void rw_lock(bool w, struct btree *b, int level)
284{ 256{
@@ -290,108 +262,71 @@ static inline void rw_lock(bool w, struct btree *b, int level)
290 262
291static inline void rw_unlock(bool w, struct btree *b) 263static inline void rw_unlock(bool w, struct btree *b)
292{ 264{
293#ifdef CONFIG_BCACHE_EDEBUG
294 unsigned i;
295
296 if (w && b->key.ptr[0])
297 for (i = 0; i <= b->nsets; i++)
298 bch_check_key_order(b, b->sets[i].data);
299#endif
300
301 if (w) 265 if (w)
302 b->seq++; 266 b->seq++;
303 (w ? up_write : up_read)(&b->lock); 267 (w ? up_write : up_read)(&b->lock);
304} 268}
305 269
306#define insert_lock(s, b) ((b)->level <= (s)->lock) 270void bch_btree_node_read(struct btree *);
271void bch_btree_node_write(struct btree *, struct closure *);
307 272
308/* 273void bch_btree_set_root(struct btree *);
309 * These macros are for recursing down the btree - they handle the details of 274struct btree *bch_btree_node_alloc(struct cache_set *, int, bool);
310 * locking and looking up nodes in the cache for you. They're best treated as 275struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
311 * mere syntax when reading code that uses them.
312 *
313 * op->lock determines whether we take a read or a write lock at a given depth.
314 * If you've got a read lock and find that you need a write lock (i.e. you're
315 * going to have to split), set op->lock and return -EINTR; btree_root() will
316 * call you again and you'll have the correct lock.
317 */
318 276
319/** 277int bch_btree_insert_check_key(struct btree *, struct btree_op *,
320 * btree - recurse down the btree on a specified key 278 struct bkey *);
321 * @fn: function to call, which will be passed the child node 279int bch_btree_insert(struct cache_set *, struct keylist *,
322 * @key: key to recurse on 280 atomic_t *, struct bkey *);
323 * @b: parent btree node 281
324 * @op: pointer to struct btree_op 282int bch_gc_thread_start(struct cache_set *);
325 */ 283size_t bch_btree_gc_finish(struct cache_set *);
326#define btree(fn, key, b, op, ...) \ 284void bch_moving_gc(struct cache_set *);
327({ \ 285int bch_btree_check(struct cache_set *);
328 int _r, l = (b)->level - 1; \ 286uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
329 bool _w = l <= (op)->lock; \
330 struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
331 if (!IS_ERR(_b)) { \
332 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
333 rw_unlock(_w, _b); \
334 } else \
335 _r = PTR_ERR(_b); \
336 _r; \
337})
338
339/**
340 * btree_root - call a function on the root of the btree
341 * @fn: function to call, which will be passed the child node
342 * @c: cache set
343 * @op: pointer to struct btree_op
344 */
345#define btree_root(fn, c, op, ...) \
346({ \
347 int _r = -EINTR; \
348 do { \
349 struct btree *_b = (c)->root; \
350 bool _w = insert_lock(op, _b); \
351 rw_lock(_w, _b, _b->level); \
352 if (_b == (c)->root && \
353 _w == insert_lock(op, _b)) \
354 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
355 rw_unlock(_w, _b); \
356 bch_cannibalize_unlock(c, &(op)->cl); \
357 } while (_r == -EINTR); \
358 \
359 _r; \
360})
361 287
362static inline bool should_split(struct btree *b) 288static inline void wake_up_gc(struct cache_set *c)
363{ 289{
364 struct bset *i = write_block(b); 290 if (c->gc_thread)
365 return b->written >= btree_blocks(b) || 291 wake_up_process(c->gc_thread);
366 (i->seq == b->sets[0].data->seq &&
367 b->written + __set_blocks(i, i->keys + 15, b->c)
368 > btree_blocks(b));
369} 292}
370 293
371void bch_btree_node_read(struct btree *); 294#define MAP_DONE 0
372void bch_btree_node_write(struct btree *, struct closure *); 295#define MAP_CONTINUE 1
373 296
374void bch_cannibalize_unlock(struct cache_set *, struct closure *); 297#define MAP_ALL_NODES 0
375void bch_btree_set_root(struct btree *); 298#define MAP_LEAF_NODES 1
376struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
377struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
378 int, struct btree_op *);
379 299
380bool bch_btree_insert_check_key(struct btree *, struct btree_op *, 300#define MAP_END_KEY 1
381 struct bio *);
382int bch_btree_insert(struct btree_op *, struct cache_set *);
383 301
384int bch_btree_search_recurse(struct btree *, struct btree_op *); 302typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
303int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
304 struct bkey *, btree_map_nodes_fn *, int);
385 305
386void bch_queue_gc(struct cache_set *); 306static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
387size_t bch_btree_gc_finish(struct cache_set *); 307 struct bkey *from, btree_map_nodes_fn *fn)
388void bch_moving_gc(struct closure *); 308{
389int bch_btree_check(struct cache_set *, struct btree_op *); 309 return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
390uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); 310}
311
312static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
313 struct cache_set *c,
314 struct bkey *from,
315 btree_map_nodes_fn *fn)
316{
317 return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
318}
319
320typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
321 struct bkey *);
322int bch_btree_map_keys(struct btree_op *, struct cache_set *,
323 struct bkey *, btree_map_keys_fn *, int);
324
325typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
391 326
392void bch_keybuf_init(struct keybuf *); 327void bch_keybuf_init(struct keybuf *);
393void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *, 328void bch_refill_keybuf(struct cache_set *, struct keybuf *,
394 keybuf_pred_fn *); 329 struct bkey *, keybuf_pred_fn *);
395bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, 330bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
396 struct bkey *); 331 struct bkey *);
397void bch_keybuf_del(struct keybuf *, struct keybuf_key *); 332void bch_keybuf_del(struct keybuf *, struct keybuf_key *);