aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/bset.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/bset.c')
-rw-r--r--drivers/md/bcache/bset.c112
1 files changed, 60 insertions, 52 deletions
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index f32216c75948..6bffde478926 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -106,6 +106,43 @@ bad:
106 return true; 106 return true;
107} 107}
108 108
109static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k,
110 unsigned ptr)
111{
112 struct bucket *g = PTR_BUCKET(b->c, k, ptr);
113 char buf[80];
114
115 if (mutex_trylock(&b->c->bucket_lock)) {
116 if (b->level) {
117 if (KEY_DIRTY(k) ||
118 g->prio != BTREE_PRIO ||
119 (b->c->gc_mark_valid &&
120 GC_MARK(g) != GC_MARK_METADATA))
121 goto err;
122
123 } else {
124 if (g->prio == BTREE_PRIO)
125 goto err;
126
127 if (KEY_DIRTY(k) &&
128 b->c->gc_mark_valid &&
129 GC_MARK(g) != GC_MARK_DIRTY)
130 goto err;
131 }
132 mutex_unlock(&b->c->bucket_lock);
133 }
134
135 return false;
136err:
137 mutex_unlock(&b->c->bucket_lock);
138 bch_bkey_to_text(buf, sizeof(buf), k);
139 btree_bug(b,
140"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
141 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
142 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
143 return true;
144}
145
109bool bch_ptr_bad(struct btree *b, const struct bkey *k) 146bool bch_ptr_bad(struct btree *b, const struct bkey *k)
110{ 147{
111 struct bucket *g; 148 struct bucket *g;
@@ -133,46 +170,12 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
133 if (stale) 170 if (stale)
134 return true; 171 return true;
135 172
136#ifdef CONFIG_BCACHE_EDEBUG 173 if (expensive_debug_checks(b->c) &&
137 if (!mutex_trylock(&b->c->bucket_lock)) 174 ptr_bad_expensive_checks(b, k, i))
138 continue; 175 return true;
139
140 if (b->level) {
141 if (KEY_DIRTY(k) ||
142 g->prio != BTREE_PRIO ||
143 (b->c->gc_mark_valid &&
144 GC_MARK(g) != GC_MARK_METADATA))
145 goto bug;
146
147 } else {
148 if (g->prio == BTREE_PRIO)
149 goto bug;
150
151 if (KEY_DIRTY(k) &&
152 b->c->gc_mark_valid &&
153 GC_MARK(g) != GC_MARK_DIRTY)
154 goto bug;
155 }
156 mutex_unlock(&b->c->bucket_lock);
157#endif
158 } 176 }
159 177
160 return false; 178 return false;
161#ifdef CONFIG_BCACHE_EDEBUG
162bug:
163 mutex_unlock(&b->c->bucket_lock);
164
165 {
166 char buf[80];
167
168 bch_bkey_to_text(buf, sizeof(buf), k);
169 btree_bug(b,
170"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
171 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
172 g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
173 }
174 return true;
175#endif
176} 179}
177 180
178/* Key/pointer manipulation */ 181/* Key/pointer manipulation */
@@ -821,16 +824,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
821 } else 824 } else
822 i = bset_search_write_set(b, t, search); 825 i = bset_search_write_set(b, t, search);
823 826
824#ifdef CONFIG_BCACHE_EDEBUG 827 if (expensive_debug_checks(b->c)) {
825 BUG_ON(bset_written(b, t) && 828 BUG_ON(bset_written(b, t) &&
826 i.l != t->data->start && 829 i.l != t->data->start &&
827 bkey_cmp(tree_to_prev_bkey(t, 830 bkey_cmp(tree_to_prev_bkey(t,
828 inorder_to_tree(bkey_to_cacheline(t, i.l), t)), 831 inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
829 search) > 0); 832 search) > 0);
830 833
831 BUG_ON(i.r != end(t->data) && 834 BUG_ON(i.r != end(t->data) &&
832 bkey_cmp(i.r, search) <= 0); 835 bkey_cmp(i.r, search) <= 0);
833#endif 836 }
834 837
835 while (likely(i.l != i.r) && 838 while (likely(i.l != i.r) &&
836 bkey_cmp(i.l, search) <= 0) 839 bkey_cmp(i.l, search) <= 0)
@@ -871,12 +874,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
871} 874}
872 875
873struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, 876struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
874 struct bkey *search, struct bset_tree *start) 877 struct bkey *search, struct bset_tree *start)
875{ 878{
876 struct bkey *ret = NULL; 879 struct bkey *ret = NULL;
877 iter->size = ARRAY_SIZE(iter->data); 880 iter->size = ARRAY_SIZE(iter->data);
878 iter->used = 0; 881 iter->used = 0;
879 882
883#ifdef CONFIG_BCACHE_DEBUG
884 iter->b = b;
885#endif
886
880 for (; start <= &b->sets[b->nsets]; start++) { 887 for (; start <= &b->sets[b->nsets]; start++) {
881 ret = bch_bset_search(b, start, search); 888 ret = bch_bset_search(b, start, search);
882 bch_btree_iter_push(iter, ret, end(start->data)); 889 bch_btree_iter_push(iter, ret, end(start->data));
@@ -891,6 +898,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
891 struct bkey *ret = NULL; 898 struct bkey *ret = NULL;
892 899
893 if (!btree_iter_end(iter)) { 900 if (!btree_iter_end(iter)) {
901 bch_btree_iter_next_check(iter);
902
894 ret = iter->data->k; 903 ret = iter->data->k;
895 iter->data->k = bkey_next(iter->data->k); 904 iter->data->k = bkey_next(iter->data->k);
896 905
@@ -1002,7 +1011,6 @@ static void btree_mergesort(struct btree *b, struct bset *out,
1002 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; 1011 out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;
1003 1012
1004 pr_debug("sorted %i keys", out->keys); 1013 pr_debug("sorted %i keys", out->keys);
1005 bch_check_key_order(b, out);
1006} 1014}
1007 1015
1008static void __btree_sort(struct btree *b, struct btree_iter *iter, 1016static void __btree_sort(struct btree *b, struct btree_iter *iter,
@@ -1063,15 +1071,15 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter,
1063 1071
1064void bch_btree_sort_partial(struct btree *b, unsigned start) 1072void bch_btree_sort_partial(struct btree *b, unsigned start)
1065{ 1073{
1066 size_t oldsize = 0, order = b->page_order, keys = 0; 1074 size_t order = b->page_order, keys = 0;
1067 struct btree_iter iter; 1075 struct btree_iter iter;
1076 int oldsize = bch_count_data(b);
1077
1068 __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); 1078 __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]);
1069 1079
1070 BUG_ON(b->sets[b->nsets].data == write_block(b) && 1080 BUG_ON(b->sets[b->nsets].data == write_block(b) &&
1071 (b->sets[b->nsets].size || b->nsets)); 1081 (b->sets[b->nsets].size || b->nsets));
1072 1082
1073 if (b->written)
1074 oldsize = bch_count_data(b);
1075 1083
1076 if (start) { 1084 if (start) {
1077 unsigned i; 1085 unsigned i;
@@ -1087,7 +1095,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start)
1087 1095
1088 __btree_sort(b, &iter, start, order, false); 1096 __btree_sort(b, &iter, start, order, false);
1089 1097
1090 EBUG_ON(b->written && bch_count_data(b) != oldsize); 1098 EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize);
1091} 1099}
1092 1100
1093void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) 1101void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter)