diff options
author | Kent Overstreet <koverstreet@google.com> | 2013-03-28 14:50:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-03-28 14:50:55 -0400 |
commit | 169ef1cf6171d35550fef85645b83b960e241cff (patch) | |
tree | 61451e5477e992d5bdee0fc906f9db018afc1239 /drivers/md/bcache/btree.c | |
parent | 0b6ef4164f50698eee536903d69d086add1a7889 (diff) |
bcache: Don't export utility code, prefix with bch_
Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: linux-bcache@vger.kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/btree.c')
-rw-r--r-- | drivers/md/bcache/btree.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 24b678059091..f2b2c653c5a5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -129,7 +129,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i) | |||
129 | uint64_t crc = b->key.ptr[0]; | 129 | uint64_t crc = b->key.ptr[0]; |
130 | void *data = (void *) i + 8, *end = end(i); | 130 | void *data = (void *) i + 8, *end = end(i); |
131 | 131 | ||
132 | crc = crc64_update(crc, data, end - data); | 132 | crc = bch_crc64_update(crc, data, end - data); |
133 | return crc ^ 0xffffffffffffffff; | 133 | return crc ^ 0xffffffffffffffff; |
134 | } | 134 | } |
135 | 135 | ||
@@ -231,7 +231,7 @@ out: | |||
231 | mutex_unlock(&b->c->fill_lock); | 231 | mutex_unlock(&b->c->fill_lock); |
232 | 232 | ||
233 | spin_lock(&b->c->btree_read_time_lock); | 233 | spin_lock(&b->c->btree_read_time_lock); |
234 | time_stats_update(&b->c->btree_read_time, b->io_start_time); | 234 | bch_time_stats_update(&b->c->btree_read_time, b->io_start_time); |
235 | spin_unlock(&b->c->btree_read_time_lock); | 235 | spin_unlock(&b->c->btree_read_time_lock); |
236 | 236 | ||
237 | smp_wmb(); /* read_done is our write lock */ | 237 | smp_wmb(); /* read_done is our write lock */ |
@@ -259,7 +259,7 @@ void bch_btree_read(struct btree *b) | |||
259 | b->bio->bi_rw = REQ_META|READ_SYNC; | 259 | b->bio->bi_rw = REQ_META|READ_SYNC; |
260 | b->bio->bi_size = KEY_SIZE(&b->key) << 9; | 260 | b->bio->bi_size = KEY_SIZE(&b->key) << 9; |
261 | 261 | ||
262 | bio_map(b->bio, b->sets[0].data); | 262 | bch_bio_map(b->bio, b->sets[0].data); |
263 | 263 | ||
264 | pr_debug("%s", pbtree(b)); | 264 | pr_debug("%s", pbtree(b)); |
265 | trace_bcache_btree_read(b->bio); | 265 | trace_bcache_btree_read(b->bio); |
@@ -327,12 +327,12 @@ static void do_btree_write(struct btree *b) | |||
327 | btree_bio_init(b); | 327 | btree_bio_init(b); |
328 | b->bio->bi_rw = REQ_META|WRITE_SYNC; | 328 | b->bio->bi_rw = REQ_META|WRITE_SYNC; |
329 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); | 329 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); |
330 | bio_map(b->bio, i); | 330 | bch_bio_map(b->bio, i); |
331 | 331 | ||
332 | bkey_copy(&k.key, &b->key); | 332 | bkey_copy(&k.key, &b->key); |
333 | SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); | 333 | SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i)); |
334 | 334 | ||
335 | if (!bio_alloc_pages(b->bio, GFP_NOIO)) { | 335 | if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) { |
336 | int j; | 336 | int j; |
337 | struct bio_vec *bv; | 337 | struct bio_vec *bv; |
338 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); | 338 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); |
@@ -347,7 +347,7 @@ static void do_btree_write(struct btree *b) | |||
347 | continue_at(cl, btree_write_done, NULL); | 347 | continue_at(cl, btree_write_done, NULL); |
348 | } else { | 348 | } else { |
349 | b->bio->bi_vcnt = 0; | 349 | b->bio->bi_vcnt = 0; |
350 | bio_map(b->bio, i); | 350 | bch_bio_map(b->bio, i); |
351 | 351 | ||
352 | trace_bcache_btree_write(b->bio); | 352 | trace_bcache_btree_write(b->bio); |
353 | bch_submit_bbio(b->bio, b->c, &k.key, 0); | 353 | bch_submit_bbio(b->bio, b->c, &k.key, 0); |
@@ -815,7 +815,7 @@ retry: | |||
815 | void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) | 815 | void bch_cannibalize_unlock(struct cache_set *c, struct closure *cl) |
816 | { | 816 | { |
817 | if (c->try_harder == cl) { | 817 | if (c->try_harder == cl) { |
818 | time_stats_update(&c->try_harder_time, c->try_harder_start); | 818 | bch_time_stats_update(&c->try_harder_time, c->try_harder_start); |
819 | c->try_harder = NULL; | 819 | c->try_harder = NULL; |
820 | __closure_wake_up(&c->try_wait); | 820 | __closure_wake_up(&c->try_wait); |
821 | } | 821 | } |
@@ -1536,7 +1536,7 @@ static void bch_btree_gc(struct closure *cl) | |||
1536 | 1536 | ||
1537 | available = bch_btree_gc_finish(c); | 1537 | available = bch_btree_gc_finish(c); |
1538 | 1538 | ||
1539 | time_stats_update(&c->btree_gc_time, start_time); | 1539 | bch_time_stats_update(&c->btree_gc_time, start_time); |
1540 | 1540 | ||
1541 | stats.key_bytes *= sizeof(uint64_t); | 1541 | stats.key_bytes *= sizeof(uint64_t); |
1542 | stats.dirty <<= 9; | 1542 | stats.dirty <<= 9; |
@@ -2007,7 +2007,7 @@ static int btree_split(struct btree *b, struct btree_op *op) | |||
2007 | rw_unlock(true, n1); | 2007 | rw_unlock(true, n1); |
2008 | btree_node_free(b, op); | 2008 | btree_node_free(b, op); |
2009 | 2009 | ||
2010 | time_stats_update(&b->c->btree_split_time, start_time); | 2010 | bch_time_stats_update(&b->c->btree_split_time, start_time); |
2011 | 2011 | ||
2012 | return 0; | 2012 | return 0; |
2013 | err_free2: | 2013 | err_free2: |