aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-18 00:56:21 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:12 -0500
commitfafff81cead78157099df1ee10af16cc51893ddc (patch)
treea198145a2ac94431667e82e2eb09c1365fb94b84 /drivers/md
parent085d2a3dd4d65b7bce1dead987c647dbbc014281 (diff)
bcache: Bkey indexing renaming
More refactoring: node() -> bset_bkey_idx() end() -> bset_bkey_last() Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/bcache.h11
-rw-r--r--drivers/md/bcache/bset.c28
-rw-r--r--drivers/md/bcache/bset.h30
-rw-r--r--drivers/md/bcache/btree.c33
-rw-r--r--drivers/md/bcache/debug.c6
-rw-r--r--drivers/md/bcache/journal.c6
6 files changed, 62 insertions, 52 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 3fd87323368c..2b46c86ac440 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -724,9 +724,6 @@ struct bbio {
724#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c)) 724#define __set_blocks(i, k, c) DIV_ROUND_UP(__set_bytes(i, k), block_bytes(c))
725#define set_blocks(i, c) __set_blocks(i, (i)->keys, c) 725#define set_blocks(i, c) __set_blocks(i, (i)->keys, c)
726 726
727#define node(i, j) ((struct bkey *) ((i)->d + (j)))
728#define end(i) node(i, (i)->keys)
729
730#define btree_data_space(b) (PAGE_SIZE << (b)->page_order) 727#define btree_data_space(b) (PAGE_SIZE << (b)->page_order)
731 728
732#define prios_per_bucket(c) \ 729#define prios_per_bucket(c) \
@@ -791,18 +788,14 @@ static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
791 788
792/* Btree key macros */ 789/* Btree key macros */
793 790
794static inline void bkey_init(struct bkey *k)
795{
796 *k = ZERO_KEY;
797}
798
799/* 791/*
800 * This is used for various on disk data structures - cache_sb, prio_set, bset, 792 * This is used for various on disk data structures - cache_sb, prio_set, bset,
801 * jset: The checksum is _always_ the first 8 bytes of these structs 793 * jset: The checksum is _always_ the first 8 bytes of these structs
802 */ 794 */
803#define csum_set(i) \ 795#define csum_set(i) \
804 bch_crc64(((void *) (i)) + sizeof(uint64_t), \ 796 bch_crc64(((void *) (i)) + sizeof(uint64_t), \
805 ((void *) end(i)) - (((void *) (i)) + sizeof(uint64_t))) 797 ((void *) bset_bkey_last(i)) - \
798 (((void *) (i)) + sizeof(uint64_t)))
806 799
807/* Error handling macros */ 800/* Error handling macros */
808 801
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index f91347a55c41..bfee926e35f0 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -500,7 +500,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
500 : tree_to_prev_bkey(t, j >> ffs(j)); 500 : tree_to_prev_bkey(t, j >> ffs(j));
501 501
502 struct bkey *r = is_power_of_2(j + 1) 502 struct bkey *r = is_power_of_2(j + 1)
503 ? node(t->data, t->data->keys - bkey_u64s(&t->end)) 503 ? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
504 : tree_to_bkey(t, j >> (ffz(j) + 1)); 504 : tree_to_bkey(t, j >> (ffz(j) + 1));
505 505
506 BUG_ON(m < l || m > r); 506 BUG_ON(m < l || m > r);
@@ -559,7 +559,7 @@ static void bset_build_written_tree(struct btree *b)
559 bset_alloc_tree(b, t); 559 bset_alloc_tree(b, t);
560 560
561 t->size = min_t(unsigned, 561 t->size = min_t(unsigned,
562 bkey_to_cacheline(t, end(t->data)), 562 bkey_to_cacheline(t, bset_bkey_last(t->data)),
563 b->sets->tree + bset_tree_space(b) - t->tree); 563 b->sets->tree + bset_tree_space(b) - t->tree);
564 564
565 if (t->size < 2) { 565 if (t->size < 2) {
@@ -582,7 +582,7 @@ static void bset_build_written_tree(struct btree *b)
582 t->tree[j].m = bkey_to_cacheline_offset(k); 582 t->tree[j].m = bkey_to_cacheline_offset(k);
583 } 583 }
584 584
585 while (bkey_next(k) != end(t->data)) 585 while (bkey_next(k) != bset_bkey_last(t->data))
586 k = bkey_next(k); 586 k = bkey_next(k);
587 587
588 t->end = *k; 588 t->end = *k;
@@ -600,7 +600,7 @@ void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k)
600 unsigned inorder, j = 1; 600 unsigned inorder, j = 1;
601 601
602 for (t = b->sets; t <= &b->sets[b->nsets]; t++) 602 for (t = b->sets; t <= &b->sets[b->nsets]; t++)
603 if (k < end(t->data)) 603 if (k < bset_bkey_last(t->data))
604 goto found_set; 604 goto found_set;
605 605
606 BUG(); 606 BUG();
@@ -613,7 +613,7 @@ found_set:
613 if (k == t->data->start) 613 if (k == t->data->start)
614 goto fix_left; 614 goto fix_left;
615 615
616 if (bkey_next(k) == end(t->data)) { 616 if (bkey_next(k) == bset_bkey_last(t->data)) {
617 t->end = *k; 617 t->end = *k;
618 goto fix_right; 618 goto fix_right;
619 } 619 }
@@ -679,7 +679,7 @@ void bch_bset_fix_lookup_table(struct btree *b, struct bkey *k)
679 /* Possibly add a new entry to the end of the lookup table */ 679 /* Possibly add a new entry to the end of the lookup table */
680 680
681 for (k = table_to_bkey(t, t->size - 1); 681 for (k = table_to_bkey(t, t->size - 1);
682 k != end(t->data); 682 k != bset_bkey_last(t->data);
683 k = bkey_next(k)) 683 k = bkey_next(k))
684 if (t->size == bkey_to_cacheline(t, k)) { 684 if (t->size == bkey_to_cacheline(t, k)) {
685 t->prev[t->size] = bkey_to_cacheline_offset(k); 685 t->prev[t->size] = bkey_to_cacheline_offset(k);
@@ -715,7 +715,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
715 unsigned li = 0, ri = t->size; 715 unsigned li = 0, ri = t->size;
716 716
717 BUG_ON(!b->nsets && 717 BUG_ON(!b->nsets &&
718 t->size < bkey_to_cacheline(t, end(t->data))); 718 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
719 719
720 while (li + 1 != ri) { 720 while (li + 1 != ri) {
721 unsigned m = (li + ri) >> 1; 721 unsigned m = (li + ri) >> 1;
@@ -728,7 +728,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
728 728
729 return (struct bset_search_iter) { 729 return (struct bset_search_iter) {
730 table_to_bkey(t, li), 730 table_to_bkey(t, li),
731 ri < t->size ? table_to_bkey(t, ri) : end(t->data) 731 ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
732 }; 732 };
733} 733}
734 734
@@ -780,7 +780,7 @@ static struct bset_search_iter bset_search_tree(struct btree *b,
780 f = &t->tree[inorder_next(j, t->size)]; 780 f = &t->tree[inorder_next(j, t->size)];
781 r = cacheline_to_bkey(t, inorder, f->m); 781 r = cacheline_to_bkey(t, inorder, f->m);
782 } else 782 } else
783 r = end(t->data); 783 r = bset_bkey_last(t->data);
784 } else { 784 } else {
785 r = cacheline_to_bkey(t, inorder, f->m); 785 r = cacheline_to_bkey(t, inorder, f->m);
786 786
@@ -816,7 +816,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
816 816
817 if (unlikely(!t->size)) { 817 if (unlikely(!t->size)) {
818 i.l = t->data->start; 818 i.l = t->data->start;
819 i.r = end(t->data); 819 i.r = bset_bkey_last(t->data);
820 } else if (bset_written(b, t)) { 820 } else if (bset_written(b, t)) {
821 /* 821 /*
822 * Each node in the auxiliary search tree covers a certain range 822 * Each node in the auxiliary search tree covers a certain range
@@ -826,7 +826,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
826 */ 826 */
827 827
828 if (unlikely(bkey_cmp(search, &t->end) >= 0)) 828 if (unlikely(bkey_cmp(search, &t->end) >= 0))
829 return end(t->data); 829 return bset_bkey_last(t->data);
830 830
831 if (unlikely(bkey_cmp(search, t->data->start) < 0)) 831 if (unlikely(bkey_cmp(search, t->data->start) < 0))
832 return t->data->start; 832 return t->data->start;
@@ -842,7 +842,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
842 inorder_to_tree(bkey_to_cacheline(t, i.l), t)), 842 inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
843 search) > 0); 843 search) > 0);
844 844
845 BUG_ON(i.r != end(t->data) && 845 BUG_ON(i.r != bset_bkey_last(t->data) &&
846 bkey_cmp(i.r, search) <= 0); 846 bkey_cmp(i.r, search) <= 0);
847 } 847 }
848 848
@@ -897,7 +897,7 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
897 897
898 for (; start <= &b->sets[b->nsets]; start++) { 898 for (; start <= &b->sets[b->nsets]; start++) {
899 ret = bch_bset_search(b, start, search); 899 ret = bch_bset_search(b, start, search);
900 bch_btree_iter_push(iter, ret, end(start->data)); 900 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
901 } 901 }
902 902
903 return ret; 903 return ret;
@@ -1067,7 +1067,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
1067 } else { 1067 } else {
1068 b->sets[start].data->keys = out->keys; 1068 b->sets[start].data->keys = out->keys;
1069 memcpy(b->sets[start].data->start, out->start, 1069 memcpy(b->sets[start].data->start, out->start,
1070 (void *) end(out) - (void *) out->start); 1070 (void *) bset_bkey_last(out) - (void *) out->start);
1071 } 1071 }
1072 1072
1073 if (used_mempool) 1073 if (used_mempool)
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 303d31a3b9e6..88b6edbf508b 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -190,14 +190,6 @@ struct bset_tree {
190 struct bset *data; 190 struct bset *data;
191}; 191};
192 192
193static __always_inline int64_t bkey_cmp(const struct bkey *l,
194 const struct bkey *r)
195{
196 return unlikely(KEY_INODE(l) != KEY_INODE(r))
197 ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
198 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
199}
200
201/* Keylists */ 193/* Keylists */
202 194
203struct keylist { 195struct keylist {
@@ -261,6 +253,28 @@ struct bkey *bch_keylist_pop(struct keylist *);
261void bch_keylist_pop_front(struct keylist *); 253void bch_keylist_pop_front(struct keylist *);
262int __bch_keylist_realloc(struct keylist *, unsigned); 254int __bch_keylist_realloc(struct keylist *, unsigned);
263 255
256/* Bkey utility code */
257
258#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
259
260static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
261{
262 return bkey_idx(i->start, idx);
263}
264
265static inline void bkey_init(struct bkey *k)
266{
267 *k = ZERO_KEY;
268}
269
270static __always_inline int64_t bkey_cmp(const struct bkey *l,
271 const struct bkey *r)
272{
273 return unlikely(KEY_INODE(l) != KEY_INODE(r))
274 ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r)
275 : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
276}
277
264void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, 278void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
265 unsigned); 279 unsigned);
266bool __bch_cut_front(const struct bkey *, struct bkey *); 280bool __bch_cut_front(const struct bkey *, struct bkey *);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index f0a6399fdd3c..8aaaf16637a0 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -197,7 +197,7 @@ void bkey_put(struct cache_set *c, struct bkey *k)
197static uint64_t btree_csum_set(struct btree *b, struct bset *i) 197static uint64_t btree_csum_set(struct btree *b, struct bset *i)
198{ 198{
199 uint64_t crc = b->key.ptr[0]; 199 uint64_t crc = b->key.ptr[0];
200 void *data = (void *) i + 8, *end = end(i); 200 void *data = (void *) i + 8, *end = bset_bkey_last(i);
201 201
202 crc = bch_crc64_update(crc, data, end - data); 202 crc = bch_crc64_update(crc, data, end - data);
203 return crc ^ 0xffffffffffffffffULL; 203 return crc ^ 0xffffffffffffffffULL;
@@ -251,7 +251,7 @@ void bch_btree_node_read_done(struct btree *b)
251 if (i != b->sets[0].data && !i->keys) 251 if (i != b->sets[0].data && !i->keys)
252 goto err; 252 goto err;
253 253
254 bch_btree_iter_push(iter, i->start, end(i)); 254 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
255 255
256 b->written += set_blocks(i, b->c); 256 b->written += set_blocks(i, b->c);
257 } 257 }
@@ -1310,7 +1310,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1310 1310
1311 if (i > 1) { 1311 if (i > 1) {
1312 for (k = n2->start; 1312 for (k = n2->start;
1313 k < end(n2); 1313 k < bset_bkey_last(n2);
1314 k = bkey_next(k)) { 1314 k = bkey_next(k)) {
1315 if (__set_blocks(n1, n1->keys + keys + 1315 if (__set_blocks(n1, n1->keys + keys +
1316 bkey_u64s(k), b->c) > blocks) 1316 bkey_u64s(k), b->c) > blocks)
@@ -1343,16 +1343,17 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1343 if (last) 1343 if (last)
1344 bkey_copy_key(&new_nodes[i]->key, last); 1344 bkey_copy_key(&new_nodes[i]->key, last);
1345 1345
1346 memcpy(end(n1), 1346 memcpy(bset_bkey_last(n1),
1347 n2->start, 1347 n2->start,
1348 (void *) node(n2, keys) - (void *) n2->start); 1348 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1349 1349
1350 n1->keys += keys; 1350 n1->keys += keys;
1351 r[i].keys = n1->keys; 1351 r[i].keys = n1->keys;
1352 1352
1353 memmove(n2->start, 1353 memmove(n2->start,
1354 node(n2, keys), 1354 bset_bkey_idx(n2, keys),
1355 (void *) end(n2) - (void *) node(n2, keys)); 1355 (void *) bset_bkey_last(n2) -
1356 (void *) bset_bkey_idx(n2, keys));
1356 1357
1357 n2->keys -= keys; 1358 n2->keys -= keys;
1358 1359
@@ -1830,7 +1831,7 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
1830 1831
1831 memmove((uint64_t *) where + bkey_u64s(insert), 1832 memmove((uint64_t *) where + bkey_u64s(insert),
1832 where, 1833 where,
1833 (void *) end(i) - (void *) where); 1834 (void *) bset_bkey_last(i) - (void *) where);
1834 1835
1835 i->keys += bkey_u64s(insert); 1836 i->keys += bkey_u64s(insert);
1836 bkey_copy(where, insert); 1837 bkey_copy(where, insert);
@@ -2014,7 +2015,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
2014 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), 2015 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
2015 KEY_START(k), KEY_SIZE(k)); 2016 KEY_START(k), KEY_SIZE(k));
2016 2017
2017 while (m != end(i) && 2018 while (m != bset_bkey_last(i) &&
2018 bkey_cmp(k, &START_KEY(m)) > 0) 2019 bkey_cmp(k, &START_KEY(m)) > 0)
2019 prev = m, m = bkey_next(m); 2020 prev = m, m = bkey_next(m);
2020 2021
@@ -2028,12 +2029,12 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
2028 goto merged; 2029 goto merged;
2029 2030
2030 status = BTREE_INSERT_STATUS_OVERWROTE; 2031 status = BTREE_INSERT_STATUS_OVERWROTE;
2031 if (m != end(i) && 2032 if (m != bset_bkey_last(i) &&
2032 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) 2033 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
2033 goto copy; 2034 goto copy;
2034 2035
2035 status = BTREE_INSERT_STATUS_FRONT_MERGE; 2036 status = BTREE_INSERT_STATUS_FRONT_MERGE;
2036 if (m != end(i) && 2037 if (m != bset_bkey_last(i) &&
2037 bch_bkey_try_merge(b, k, m)) 2038 bch_bkey_try_merge(b, k, m))
2038 goto copy; 2039 goto copy;
2039 } else { 2040 } else {
@@ -2142,16 +2143,18 @@ static int btree_split(struct btree *b, struct btree_op *op,
2142 */ 2143 */
2143 2144
2144 while (keys < (n1->sets[0].data->keys * 3) / 5) 2145 while (keys < (n1->sets[0].data->keys * 3) / 5)
2145 keys += bkey_u64s(node(n1->sets[0].data, keys)); 2146 keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data,
2147 keys));
2146 2148
2147 bkey_copy_key(&n1->key, node(n1->sets[0].data, keys)); 2149 bkey_copy_key(&n1->key,
2148 keys += bkey_u64s(node(n1->sets[0].data, keys)); 2150 bset_bkey_idx(n1->sets[0].data, keys));
2151 keys += bkey_u64s(bset_bkey_idx(n1->sets[0].data, keys));
2149 2152
2150 n2->sets[0].data->keys = n1->sets[0].data->keys - keys; 2153 n2->sets[0].data->keys = n1->sets[0].data->keys - keys;
2151 n1->sets[0].data->keys = keys; 2154 n1->sets[0].data->keys = keys;
2152 2155
2153 memcpy(n2->sets[0].data->start, 2156 memcpy(n2->sets[0].data->start,
2154 end(n1->sets[0].data), 2157 bset_bkey_last(n1->sets[0].data),
2155 n2->sets[0].data->keys * sizeof(uint64_t)); 2158 n2->sets[0].data->keys * sizeof(uint64_t));
2156 2159
2157 bkey_copy_key(&n2->key, &b->key); 2160 bkey_copy_key(&n2->key, &b->key);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 8887c550d56c..955fa1d31774 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -84,7 +84,7 @@ static void dump_bset(struct btree *b, struct bset *i, unsigned set)
84 unsigned j; 84 unsigned j;
85 char buf[80]; 85 char buf[80];
86 86
87 for (k = i->start; k < end(i); k = next) { 87 for (k = i->start; k < bset_bkey_last(i); k = next) {
88 next = bkey_next(k); 88 next = bkey_next(k);
89 89
90 bch_bkey_to_text(buf, sizeof(buf), k); 90 bch_bkey_to_text(buf, sizeof(buf), k);
@@ -102,7 +102,7 @@ static void dump_bset(struct btree *b, struct bset *i, unsigned set)
102 102
103 printk(" %s\n", bch_ptr_status(b->c, k)); 103 printk(" %s\n", bch_ptr_status(b->c, k));
104 104
105 if (next < end(i) && 105 if (next < bset_bkey_last(i) &&
106 bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0) 106 bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
107 printk(KERN_ERR "Key skipped backwards\n"); 107 printk(KERN_ERR "Key skipped backwards\n");
108 } 108 }
@@ -162,7 +162,7 @@ void bch_btree_verify(struct btree *b)
162 if (inmemory->keys != sorted->keys || 162 if (inmemory->keys != sorted->keys ||
163 memcmp(inmemory->start, 163 memcmp(inmemory->start,
164 sorted->start, 164 sorted->start,
165 (void *) end(inmemory) - (void *) inmemory->start)) { 165 (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
166 struct bset *i; 166 struct bset *i;
167 unsigned j; 167 unsigned j;
168 168
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 9d32d5790822..5e14e3325ec1 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -284,7 +284,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
284 } 284 }
285 285
286 for (k = i->j.start; 286 for (k = i->j.start;
287 k < end(&i->j); 287 k < bset_bkey_last(&i->j);
288 k = bkey_next(k)) { 288 k = bkey_next(k)) {
289 unsigned j; 289 unsigned j;
290 290
@@ -322,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
322 n, i->j.seq - 1, start, end); 322 n, i->j.seq - 1, start, end);
323 323
324 for (k = i->j.start; 324 for (k = i->j.start;
325 k < end(&i->j); 325 k < bset_bkey_last(&i->j);
326 k = bkey_next(k)) { 326 k = bkey_next(k)) {
327 trace_bcache_journal_replay_key(k); 327 trace_bcache_journal_replay_key(k);
328 328
@@ -751,7 +751,7 @@ atomic_t *bch_journal(struct cache_set *c,
751 751
752 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); 752 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
753 753
754 memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys)); 754 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
755 w->data->keys += bch_keylist_nkeys(keys); 755 w->data->keys += bch_keylist_nkeys(keys);
756 756
757 ret = &fifo_back(&c->journal.pin); 757 ret = &fifo_back(&c->journal.pin);