aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/bset.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-12-20 20:28:16 -0500
committerKent Overstreet <kmo@daterainc.com>2014-01-08 16:05:13 -0500
commita85e968e66a175c86d0410719ea84a5bd0f1d070 (patch)
tree83bd657e47b22862380db37af3051d81c1f4e74b /drivers/md/bcache/bset.c
parent65d45231b56efb3db51eb441e2c68f8252ecdd12 (diff)
bcache: Add struct btree_keys
Soon, bset.c won't need to depend on struct btree. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/bset.c')
-rw-r--r--drivers/md/bcache/bset.c179
1 files changed, 108 insertions, 71 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index c2c42cbbe885..f34ef56560ed 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -149,33 +149,33 @@ struct bkey_float {
149#define BSET_CACHELINE 128 149#define BSET_CACHELINE 128
150 150
151/* Space required for the btree node keys */ 151/* Space required for the btree node keys */
152static inline size_t btree_keys_bytes(struct btree *b) 152static inline size_t btree_keys_bytes(struct btree_keys *b)
153{ 153{
154 return PAGE_SIZE << b->page_order; 154 return PAGE_SIZE << b->page_order;
155} 155}
156 156
157static inline size_t btree_keys_cachelines(struct btree *b) 157static inline size_t btree_keys_cachelines(struct btree_keys *b)
158{ 158{
159 return btree_keys_bytes(b) / BSET_CACHELINE; 159 return btree_keys_bytes(b) / BSET_CACHELINE;
160} 160}
161 161
162/* Space required for the auxiliary search trees */ 162/* Space required for the auxiliary search trees */
163static inline size_t bset_tree_bytes(struct btree *b) 163static inline size_t bset_tree_bytes(struct btree_keys *b)
164{ 164{
165 return btree_keys_cachelines(b) * sizeof(struct bkey_float); 165 return btree_keys_cachelines(b) * sizeof(struct bkey_float);
166} 166}
167 167
168/* Space required for the prev pointers */ 168/* Space required for the prev pointers */
169static inline size_t bset_prev_bytes(struct btree *b) 169static inline size_t bset_prev_bytes(struct btree_keys *b)
170{ 170{
171 return btree_keys_cachelines(b) * sizeof(uint8_t); 171 return btree_keys_cachelines(b) * sizeof(uint8_t);
172} 172}
173 173
174/* Memory allocation */ 174/* Memory allocation */
175 175
176void bch_btree_keys_free(struct btree *b) 176void bch_btree_keys_free(struct btree_keys *b)
177{ 177{
178 struct bset_tree *t = b->sets; 178 struct bset_tree *t = b->set;
179 179
180 if (bset_prev_bytes(b) < PAGE_SIZE) 180 if (bset_prev_bytes(b) < PAGE_SIZE)
181 kfree(t->prev); 181 kfree(t->prev);
@@ -195,10 +195,11 @@ void bch_btree_keys_free(struct btree *b)
195 t->tree = NULL; 195 t->tree = NULL;
196 t->data = NULL; 196 t->data = NULL;
197} 197}
198EXPORT_SYMBOL(bch_btree_keys_free);
198 199
199int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp) 200int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
200{ 201{
201 struct bset_tree *t = b->sets; 202 struct bset_tree *t = b->set;
202 203
203 BUG_ON(t->data); 204 BUG_ON(t->data);
204 205
@@ -225,6 +226,29 @@ err:
225 bch_btree_keys_free(b); 226 bch_btree_keys_free(b);
226 return -ENOMEM; 227 return -ENOMEM;
227} 228}
229EXPORT_SYMBOL(bch_btree_keys_alloc);
230
231void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
232 bool *expensive_debug_checks)
233{
234 unsigned i;
235
236 b->ops = ops;
237 b->expensive_debug_checks = expensive_debug_checks;
238 b->nsets = 0;
239 b->last_set_unwritten = 0;
240
241 /* XXX: shouldn't be needed */
242 for (i = 0; i < MAX_BSETS; i++)
243 b->set[i].size = 0;
244 /*
245 * Second loop starts at 1 because b->keys[0]->data is the memory we
246 * allocated
247 */
248 for (i = 1; i < MAX_BSETS; i++)
249 b->set[i].data = NULL;
250}
251EXPORT_SYMBOL(bch_btree_keys_init);
228 252
229/* Binary tree stuff for auxiliary search trees */ 253/* Binary tree stuff for auxiliary search trees */
230 254
@@ -448,9 +472,9 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
448 f->exponent = 127; 472 f->exponent = 127;
449} 473}
450 474
451static void bset_alloc_tree(struct btree *b, struct bset_tree *t) 475static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
452{ 476{
453 if (t != b->sets) { 477 if (t != b->set) {
454 unsigned j = roundup(t[-1].size, 478 unsigned j = roundup(t[-1].size,
455 64 / sizeof(struct bkey_float)); 479 64 / sizeof(struct bkey_float));
456 480
@@ -458,27 +482,30 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
458 t->prev = t[-1].prev + j; 482 t->prev = t[-1].prev + j;
459 } 483 }
460 484
461 while (t < b->sets + MAX_BSETS) 485 while (t < b->set + MAX_BSETS)
462 t++->size = 0; 486 t++->size = 0;
463} 487}
464 488
465static void bch_bset_build_unwritten_tree(struct btree *b) 489static void bch_bset_build_unwritten_tree(struct btree_keys *b)
466{ 490{
467 struct bset_tree *t = bset_tree_last(b); 491 struct bset_tree *t = bset_tree_last(b);
468 492
493 BUG_ON(b->last_set_unwritten);
494 b->last_set_unwritten = 1;
495
469 bset_alloc_tree(b, t); 496 bset_alloc_tree(b, t);
470 497
471 if (t->tree != b->sets->tree + btree_keys_cachelines(b)) { 498 if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
472 t->prev[0] = bkey_to_cacheline_offset(t->data->start); 499 t->prev[0] = bkey_to_cacheline_offset(t->data->start);
473 t->size = 1; 500 t->size = 1;
474 } 501 }
475} 502}
476 503
477void bch_bset_init_next(struct btree *b, struct bset *i, uint64_t magic) 504void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
478{ 505{
479 if (i != b->sets->data) { 506 if (i != b->set->data) {
480 b->sets[++b->nsets].data = i; 507 b->set[++b->nsets].data = i;
481 i->seq = b->sets->data->seq; 508 i->seq = b->set->data->seq;
482 } else 509 } else
483 get_random_bytes(&i->seq, sizeof(uint64_t)); 510 get_random_bytes(&i->seq, sizeof(uint64_t));
484 511
@@ -488,18 +515,21 @@ void bch_bset_init_next(struct btree *b, struct bset *i, uint64_t magic)
488 515
489 bch_bset_build_unwritten_tree(b); 516 bch_bset_build_unwritten_tree(b);
490} 517}
518EXPORT_SYMBOL(bch_bset_init_next);
491 519
492static void bset_build_written_tree(struct btree *b) 520void bch_bset_build_written_tree(struct btree_keys *b)
493{ 521{
494 struct bset_tree *t = bset_tree_last(b); 522 struct bset_tree *t = bset_tree_last(b);
495 struct bkey *k = t->data->start; 523 struct bkey *k = t->data->start;
496 unsigned j, cacheline = 1; 524 unsigned j, cacheline = 1;
497 525
526 b->last_set_unwritten = 0;
527
498 bset_alloc_tree(b, t); 528 bset_alloc_tree(b, t);
499 529
500 t->size = min_t(unsigned, 530 t->size = min_t(unsigned,
501 bkey_to_cacheline(t, bset_bkey_last(t->data)), 531 bkey_to_cacheline(t, bset_bkey_last(t->data)),
502 b->sets->tree + btree_keys_cachelines(b) - t->tree); 532 b->set->tree + btree_keys_cachelines(b) - t->tree);
503 533
504 if (t->size < 2) { 534 if (t->size < 2) {
505 t->size = 0; 535 t->size = 0;
@@ -532,13 +562,14 @@ static void bset_build_written_tree(struct btree *b)
532 j = inorder_next(j, t->size)) 562 j = inorder_next(j, t->size))
533 make_bfloat(t, j); 563 make_bfloat(t, j);
534} 564}
565EXPORT_SYMBOL(bch_bset_build_written_tree);
535 566
536void bch_bset_fix_invalidated_key(struct btree *b, struct bkey *k) 567void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
537{ 568{
538 struct bset_tree *t; 569 struct bset_tree *t;
539 unsigned inorder, j = 1; 570 unsigned inorder, j = 1;
540 571
541 for (t = b->sets; t <= bset_tree_last(b); t++) 572 for (t = b->set; t <= bset_tree_last(b); t++)
542 if (k < bset_bkey_last(t->data)) 573 if (k < bset_bkey_last(t->data))
543 goto found_set; 574 goto found_set;
544 575
@@ -577,8 +608,9 @@ fix_right: do {
577 j = j * 2 + 1; 608 j = j * 2 + 1;
578 } while (j < t->size); 609 } while (j < t->size);
579} 610}
611EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
580 612
581static void bch_bset_fix_lookup_table(struct btree *b, 613static void bch_bset_fix_lookup_table(struct btree_keys *b,
582 struct bset_tree *t, 614 struct bset_tree *t,
583 struct bkey *k) 615 struct bkey *k)
584{ 616{
@@ -613,7 +645,7 @@ static void bch_bset_fix_lookup_table(struct btree *b,
613 } 645 }
614 } 646 }
615 647
616 if (t->size == b->sets->tree + btree_keys_cachelines(b) - t->tree) 648 if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
617 return; 649 return;
618 650
619 /* Possibly add a new entry to the end of the lookup table */ 651 /* Possibly add a new entry to the end of the lookup table */
@@ -627,12 +659,12 @@ static void bch_bset_fix_lookup_table(struct btree *b,
627 } 659 }
628} 660}
629 661
630void bch_bset_insert(struct btree *b, struct bkey *where, 662void bch_bset_insert(struct btree_keys *b, struct bkey *where,
631 struct bkey *insert) 663 struct bkey *insert)
632{ 664{
633 struct bset_tree *t = bset_tree_last(b); 665 struct bset_tree *t = bset_tree_last(b);
634 666
635 BUG_ON(t->data != write_block(b)); 667 BUG_ON(!b->last_set_unwritten);
636 BUG_ON(bset_byte_offset(b, t->data) + 668 BUG_ON(bset_byte_offset(b, t->data) +
637 __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) > 669 __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
638 PAGE_SIZE << b->page_order); 670 PAGE_SIZE << b->page_order);
@@ -645,20 +677,17 @@ void bch_bset_insert(struct btree *b, struct bkey *where,
645 bkey_copy(where, insert); 677 bkey_copy(where, insert);
646 bch_bset_fix_lookup_table(b, t, where); 678 bch_bset_fix_lookup_table(b, t, where);
647} 679}
680EXPORT_SYMBOL(bch_bset_insert);
648 681
649struct bset_search_iter { 682struct bset_search_iter {
650 struct bkey *l, *r; 683 struct bkey *l, *r;
651}; 684};
652 685
653static struct bset_search_iter bset_search_write_set(struct btree *b, 686static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
654 struct bset_tree *t,
655 const struct bkey *search) 687 const struct bkey *search)
656{ 688{
657 unsigned li = 0, ri = t->size; 689 unsigned li = 0, ri = t->size;
658 690
659 BUG_ON(!b->nsets &&
660 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
661
662 while (li + 1 != ri) { 691 while (li + 1 != ri) {
663 unsigned m = (li + ri) >> 1; 692 unsigned m = (li + ri) >> 1;
664 693
@@ -674,8 +703,7 @@ static struct bset_search_iter bset_search_write_set(struct btree *b,
674 }; 703 };
675} 704}
676 705
677static struct bset_search_iter bset_search_tree(struct btree *b, 706static struct bset_search_iter bset_search_tree(struct bset_tree *t,
678 struct bset_tree *t,
679 const struct bkey *search) 707 const struct bkey *search)
680{ 708{
681 struct bkey *l, *r; 709 struct bkey *l, *r;
@@ -759,7 +787,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
759 if (unlikely(!t->size)) { 787 if (unlikely(!t->size)) {
760 i.l = t->data->start; 788 i.l = t->data->start;
761 i.r = bset_bkey_last(t->data); 789 i.r = bset_bkey_last(t->data);
762 } else if (bset_written(b, t)) { 790 } else if (bset_written(&b->keys, t)) {
763 /* 791 /*
764 * Each node in the auxiliary search tree covers a certain range 792 * Each node in the auxiliary search tree covers a certain range
765 * of bits, and keys above and below the set it covers might 793 * of bits, and keys above and below the set it covers might
@@ -773,12 +801,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
773 if (unlikely(bkey_cmp(search, t->data->start) < 0)) 801 if (unlikely(bkey_cmp(search, t->data->start) < 0))
774 return t->data->start; 802 return t->data->start;
775 803
776 i = bset_search_tree(b, t, search); 804 i = bset_search_tree(t, search);
777 } else 805 } else {
778 i = bset_search_write_set(b, t, search); 806 BUG_ON(!b->keys.nsets &&
807 t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
808
809 i = bset_search_write_set(t, search);
810 }
779 811
780 if (expensive_debug_checks(b->c)) { 812 if (expensive_debug_checks(b->c)) {
781 BUG_ON(bset_written(b, t) && 813 BUG_ON(bset_written(&b->keys, t) &&
782 i.l != t->data->start && 814 i.l != t->data->start &&
783 bkey_cmp(tree_to_prev_bkey(t, 815 bkey_cmp(tree_to_prev_bkey(t,
784 inorder_to_tree(bkey_to_cacheline(t, i.l), t)), 816 inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
@@ -794,6 +826,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
794 826
795 return i.l; 827 return i.l;
796} 828}
829EXPORT_SYMBOL(__bch_bset_search);
797 830
798/* Btree iterator */ 831/* Btree iterator */
799 832
@@ -833,7 +866,7 @@ static struct bkey *__bch_btree_iter_init(struct btree *b,
833 iter->b = b; 866 iter->b = b;
834#endif 867#endif
835 868
836 for (; start <= &b->sets[b->nsets]; start++) { 869 for (; start <= bset_tree_last(&b->keys); start++) {
837 ret = bch_bset_search(b, start, search); 870 ret = bch_bset_search(b, start, search);
838 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); 871 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
839 } 872 }
@@ -845,8 +878,9 @@ struct bkey *bch_btree_iter_init(struct btree *b,
845 struct btree_iter *iter, 878 struct btree_iter *iter,
846 struct bkey *search) 879 struct bkey *search)
847{ 880{
848 return __bch_btree_iter_init(b, iter, search, b->sets); 881 return __bch_btree_iter_init(b, iter, search, b->keys.set);
849} 882}
883EXPORT_SYMBOL(bch_btree_iter_init);
850 884
851static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, 885static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
852 btree_iter_cmp_fn *cmp) 886 btree_iter_cmp_fn *cmp)
@@ -879,9 +913,10 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
879 return __bch_btree_iter_next(iter, btree_iter_cmp); 913 return __bch_btree_iter_next(iter, btree_iter_cmp);
880 914
881} 915}
916EXPORT_SYMBOL(bch_btree_iter_next);
882 917
883struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, 918struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
884 struct btree *b, ptr_filter_fn fn) 919 struct btree_keys *b, ptr_filter_fn fn)
885{ 920{
886 struct bkey *ret; 921 struct bkey *ret;
887 922
@@ -913,15 +948,16 @@ int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
913 948
914 return 0; 949 return 0;
915} 950}
951EXPORT_SYMBOL(bch_bset_sort_state_init);
916 952
917static void btree_mergesort(struct btree *b, struct bset *out, 953static void btree_mergesort(struct btree_keys *b, struct bset *out,
918 struct btree_iter *iter, 954 struct btree_iter *iter,
919 bool fixup, bool remove_stale) 955 bool fixup, bool remove_stale)
920{ 956{
921 int i; 957 int i;
922 struct bkey *k, *last = NULL; 958 struct bkey *k, *last = NULL;
923 BKEY_PADDED(k) tmp; 959 BKEY_PADDED(k) tmp;
924 bool (*bad)(struct btree *, const struct bkey *) = remove_stale 960 bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
925 ? bch_ptr_bad 961 ? bch_ptr_bad
926 : bch_ptr_invalid; 962 : bch_ptr_invalid;
927 963
@@ -955,7 +991,7 @@ static void btree_mergesort(struct btree *b, struct bset *out,
955 pr_debug("sorted %i keys", out->keys); 991 pr_debug("sorted %i keys", out->keys);
956} 992}
957 993
958static void __btree_sort(struct btree *b, struct btree_iter *iter, 994static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
959 unsigned start, unsigned order, bool fixup, 995 unsigned start, unsigned order, bool fixup,
960 struct bset_sort_state *state) 996 struct bset_sort_state *state)
961{ 997{
@@ -968,7 +1004,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
968 1004
969 out = page_address(mempool_alloc(state->pool, GFP_NOIO)); 1005 out = page_address(mempool_alloc(state->pool, GFP_NOIO));
970 used_mempool = true; 1006 used_mempool = true;
971 order = ilog2(bucket_pages(b->c)); 1007 order = state->page_order;
972 } 1008 }
973 1009
974 start_time = local_clock(); 1010 start_time = local_clock();
@@ -983,13 +1019,13 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
983 * memcpy() 1019 * memcpy()
984 */ 1020 */
985 1021
986 out->magic = bset_magic(&b->c->sb); 1022 out->magic = b->set->data->magic;
987 out->seq = b->sets[0].data->seq; 1023 out->seq = b->set->data->seq;
988 out->version = b->sets[0].data->version; 1024 out->version = b->set->data->version;
989 swap(out, b->sets[0].data); 1025 swap(out, b->set->data);
990 } else { 1026 } else {
991 b->sets[start].data->keys = out->keys; 1027 b->set[start].data->keys = out->keys;
992 memcpy(b->sets[start].data->start, out->start, 1028 memcpy(b->set[start].data->start, out->start,
993 (void *) bset_bkey_last(out) - (void *) out->start); 1029 (void *) bset_bkey_last(out) - (void *) out->start);
994 } 1030 }
995 1031
@@ -998,7 +1034,7 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
998 else 1034 else
999 free_pages((unsigned long) out, order); 1035 free_pages((unsigned long) out, order);
1000 1036
1001 bset_build_written_tree(b); 1037 bch_bset_build_written_tree(b);
1002 1038
1003 if (!start) 1039 if (!start)
1004 bch_time_stats_update(&state->time, start_time); 1040 bch_time_stats_update(&state->time, start_time);
@@ -1007,34 +1043,32 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
1007void bch_btree_sort_partial(struct btree *b, unsigned start, 1043void bch_btree_sort_partial(struct btree *b, unsigned start,
1008 struct bset_sort_state *state) 1044 struct bset_sort_state *state)
1009{ 1045{
1010 size_t order = b->page_order, keys = 0; 1046 size_t order = b->keys.page_order, keys = 0;
1011 struct btree_iter iter; 1047 struct btree_iter iter;
1012 int oldsize = bch_count_data(b); 1048 int oldsize = bch_count_data(b);
1013 1049
1014 __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); 1050 __bch_btree_iter_init(b, &iter, NULL, &b->keys.set[start]);
1015
1016 BUG_ON(!bset_written(b, bset_tree_last(b)) &&
1017 (bset_tree_last(b)->size || b->nsets));
1018 1051
1019 if (start) { 1052 if (start) {
1020 unsigned i; 1053 unsigned i;
1021 1054
1022 for (i = start; i <= b->nsets; i++) 1055 for (i = start; i <= b->keys.nsets; i++)
1023 keys += b->sets[i].data->keys; 1056 keys += b->keys.set[i].data->keys;
1024 1057
1025 order = roundup_pow_of_two(__set_bytes(b->sets->data, 1058 order = roundup_pow_of_two(__set_bytes(b->keys.set->data,
1026 keys)) / PAGE_SIZE; 1059 keys)) / PAGE_SIZE;
1027 if (order) 1060 if (order)
1028 order = ilog2(order); 1061 order = ilog2(order);
1029 } 1062 }
1030 1063
1031 __btree_sort(b, &iter, start, order, false, state); 1064 __btree_sort(&b->keys, &iter, start, order, false, state);
1032 1065
1033 EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize); 1066 EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
1034} 1067}
1035EXPORT_SYMBOL(bch_btree_sort_partial); 1068EXPORT_SYMBOL(bch_btree_sort_partial);
1036 1069
1037void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter, 1070void bch_btree_sort_and_fix_extents(struct btree_keys *b,
1071 struct btree_iter *iter,
1038 struct bset_sort_state *state) 1072 struct bset_sort_state *state)
1039{ 1073{
1040 __btree_sort(b, iter, 0, b->page_order, true, state); 1074 __btree_sort(b, iter, 0, b->page_order, true, state);
@@ -1048,11 +1082,11 @@ void bch_btree_sort_into(struct btree *b, struct btree *new,
1048 struct btree_iter iter; 1082 struct btree_iter iter;
1049 bch_btree_iter_init(b, &iter, NULL); 1083 bch_btree_iter_init(b, &iter, NULL);
1050 1084
1051 btree_mergesort(b, new->sets->data, &iter, false, true); 1085 btree_mergesort(&b->keys, new->keys.set->data, &iter, false, true);
1052 1086
1053 bch_time_stats_update(&state->time, start_time); 1087 bch_time_stats_update(&state->time, start_time);
1054 1088
1055 new->sets->size = 0; 1089 new->keys.set->size = 0; // XXX: why?
1056} 1090}
1057 1091
1058#define SORT_CRIT (4096 / sizeof(uint64_t)) 1092#define SORT_CRIT (4096 / sizeof(uint64_t))
@@ -1062,28 +1096,31 @@ void bch_btree_sort_lazy(struct btree *b, struct bset_sort_state *state)
1062 unsigned crit = SORT_CRIT; 1096 unsigned crit = SORT_CRIT;
1063 int i; 1097 int i;
1064 1098
1099 b->keys.last_set_unwritten = 0;
1100
1065 /* Don't sort if nothing to do */ 1101 /* Don't sort if nothing to do */
1066 if (!b->nsets) 1102 if (!b->keys.nsets)
1067 goto out; 1103 goto out;
1068 1104
1069 for (i = b->nsets - 1; i >= 0; --i) { 1105 for (i = b->keys.nsets - 1; i >= 0; --i) {
1070 crit *= state->crit_factor; 1106 crit *= state->crit_factor;
1071 1107
1072 if (b->sets[i].data->keys < crit) { 1108 if (b->keys.set[i].data->keys < crit) {
1073 bch_btree_sort_partial(b, i, state); 1109 bch_btree_sort_partial(b, i, state);
1074 return; 1110 return;
1075 } 1111 }
1076 } 1112 }
1077 1113
1078 /* Sort if we'd overflow */ 1114 /* Sort if we'd overflow */
1079 if (b->nsets + 1 == MAX_BSETS) { 1115 if (b->keys.nsets + 1 == MAX_BSETS) {
1080 bch_btree_sort(b, state); 1116 bch_btree_sort(b, state);
1081 return; 1117 return;
1082 } 1118 }
1083 1119
1084out: 1120out:
1085 bset_build_written_tree(b); 1121 bch_bset_build_written_tree(&b->keys);
1086} 1122}
1123EXPORT_SYMBOL(bch_btree_sort_lazy);
1087 1124
1088/* Sysfs stuff */ 1125/* Sysfs stuff */
1089 1126
@@ -1102,12 +1139,12 @@ static int btree_bset_stats(struct btree_op *op, struct btree *b)
1102 1139
1103 stats->nodes++; 1140 stats->nodes++;
1104 1141
1105 for (i = 0; i <= b->nsets; i++) { 1142 for (i = 0; i <= b->keys.nsets; i++) {
1106 struct bset_tree *t = &b->sets[i]; 1143 struct bset_tree *t = &b->keys.set[i];
1107 size_t bytes = t->data->keys * sizeof(uint64_t); 1144 size_t bytes = t->data->keys * sizeof(uint64_t);
1108 size_t j; 1145 size_t j;
1109 1146
1110 if (bset_written(b, t)) { 1147 if (bset_written(&b->keys, t)) {
1111 stats->sets_written++; 1148 stats->sets_written++;
1112 stats->bytes_written += bytes; 1149 stats->bytes_written += bytes;
1113 1150