diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-12-17 06:11:06 -0500 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2014-01-08 16:05:15 -0500 |
commit | 9dd6358a21daf4fc6a5b2b779267a62f0d1d3181 (patch) | |
tree | 21de0133230a783b70ec0304fece80a5be4a8f22 | |
parent | 3b3e9e50dd951725130645660b526c4f367dcdee (diff) |
bcache: Fix auxiliary search trees for key size > cacheline size
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
-rw-r--r-- | drivers/md/bcache/bset.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index f990403c4f1c..4f6b5940e609 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c | |||
@@ -511,9 +511,11 @@ static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k) | |||
511 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; | 511 | return ((void *) k - (void *) t->data) / BSET_CACHELINE; |
512 | } | 512 | } |
513 | 513 | ||
514 | static unsigned bkey_to_cacheline_offset(struct bkey *k) | 514 | static unsigned bkey_to_cacheline_offset(struct bset_tree *t, |
515 | unsigned cacheline, | ||
516 | struct bkey *k) | ||
515 | { | 517 | { |
516 | return ((size_t) k & (BSET_CACHELINE - 1)) / sizeof(uint64_t); | 518 | return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); |
517 | } | 519 | } |
518 | 520 | ||
519 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) | 521 | static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j) |
@@ -608,7 +610,7 @@ static void bch_bset_build_unwritten_tree(struct btree_keys *b) | |||
608 | bset_alloc_tree(b, t); | 610 | bset_alloc_tree(b, t); |
609 | 611 | ||
610 | if (t->tree != b->set->tree + btree_keys_cachelines(b)) { | 612 | if (t->tree != b->set->tree + btree_keys_cachelines(b)) { |
611 | t->prev[0] = bkey_to_cacheline_offset(t->data->start); | 613 | t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start); |
612 | t->size = 1; | 614 | t->size = 1; |
613 | } | 615 | } |
614 | } | 616 | } |
@@ -632,7 +634,7 @@ EXPORT_SYMBOL(bch_bset_init_next); | |||
632 | void bch_bset_build_written_tree(struct btree_keys *b) | 634 | void bch_bset_build_written_tree(struct btree_keys *b) |
633 | { | 635 | { |
634 | struct bset_tree *t = bset_tree_last(b); | 636 | struct bset_tree *t = bset_tree_last(b); |
635 | struct bkey *k = t->data->start; | 637 | struct bkey *prev = NULL, *k = t->data->start; |
636 | unsigned j, cacheline = 1; | 638 | unsigned j, cacheline = 1; |
637 | 639 | ||
638 | b->last_set_unwritten = 0; | 640 | b->last_set_unwritten = 0; |
@@ -654,13 +656,11 @@ void bch_bset_build_written_tree(struct btree_keys *b) | |||
654 | for (j = inorder_next(0, t->size); | 656 | for (j = inorder_next(0, t->size); |
655 | j; | 657 | j; |
656 | j = inorder_next(j, t->size)) { | 658 | j = inorder_next(j, t->size)) { |
657 | while (bkey_to_cacheline(t, k) != cacheline) | 659 | while (bkey_to_cacheline(t, k) < cacheline) |
658 | k = bkey_next(k); | 660 | prev = k, k = bkey_next(k); |
659 | 661 | ||
660 | t->prev[j] = bkey_u64s(k); | 662 | t->prev[j] = bkey_u64s(prev); |
661 | k = bkey_next(k); | 663 | t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); |
662 | cacheline++; | ||
663 | t->tree[j].m = bkey_to_cacheline_offset(k); | ||
664 | } | 664 | } |
665 | 665 | ||
666 | while (bkey_next(k) != bset_bkey_last(t->data)) | 666 | while (bkey_next(k) != bset_bkey_last(t->data)) |
@@ -739,8 +739,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, | |||
739 | * lookup table for the first key that is strictly greater than k: | 739 | * lookup table for the first key that is strictly greater than k: |
740 | * it's either k's cacheline or the next one | 740 | * it's either k's cacheline or the next one |
741 | */ | 741 | */ |
742 | if (j < t->size && | 742 | while (j < t->size && |
743 | table_to_bkey(t, j) <= k) | 743 | table_to_bkey(t, j) <= k) |
744 | j++; | 744 | j++; |
745 | 745 | ||
746 | /* Adjust all the lookup table entries, and find a new key for any that | 746 | /* Adjust all the lookup table entries, and find a new key for any that |
@@ -755,7 +755,7 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, | |||
755 | while (k < cacheline_to_bkey(t, j, 0)) | 755 | while (k < cacheline_to_bkey(t, j, 0)) |
756 | k = bkey_next(k); | 756 | k = bkey_next(k); |
757 | 757 | ||
758 | t->prev[j] = bkey_to_cacheline_offset(k); | 758 | t->prev[j] = bkey_to_cacheline_offset(t, j, k); |
759 | } | 759 | } |
760 | } | 760 | } |
761 | 761 | ||
@@ -768,7 +768,7 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, | |||
768 | k != bset_bkey_last(t->data); | 768 | k != bset_bkey_last(t->data); |
769 | k = bkey_next(k)) | 769 | k = bkey_next(k)) |
770 | if (t->size == bkey_to_cacheline(t, k)) { | 770 | if (t->size == bkey_to_cacheline(t, k)) { |
771 | t->prev[t->size] = bkey_to_cacheline_offset(k); | 771 | t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k); |
772 | t->size++; | 772 | t->size++; |
773 | } | 773 | } |
774 | } | 774 | } |