diff options
Diffstat (limited to 'drivers/md/bcache/bset.h')
-rw-r--r-- | drivers/md/bcache/bset.h | 247 |
1 files changed, 114 insertions, 133 deletions
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 4f60c21c7a38..ab31f3fc8d43 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h | |||
@@ -144,22 +144,11 @@ | |||
144 | * first key in that range of bytes again. | 144 | * first key in that range of bytes again. |
145 | */ | 145 | */ |
146 | 146 | ||
147 | struct cache_set; | 147 | struct btree; |
148 | 148 | struct bkey_float; | |
149 | /* Btree key comparison/iteration */ | ||
150 | 149 | ||
151 | #define MAX_BSETS 4U | 150 | #define MAX_BSETS 4U |
152 | 151 | ||
153 | struct btree_iter { | ||
154 | size_t size, used; | ||
155 | #ifdef CONFIG_BCACHE_DEBUG | ||
156 | struct btree *b; | ||
157 | #endif | ||
158 | struct btree_iter_set { | ||
159 | struct bkey *k, *end; | ||
160 | } data[MAX_BSETS]; | ||
161 | }; | ||
162 | |||
163 | struct bset_tree { | 152 | struct bset_tree { |
164 | /* | 153 | /* |
165 | * We construct a binary tree in an array as if the array | 154 | * We construct a binary tree in an array as if the array |
@@ -169,14 +158,14 @@ struct bset_tree { | |||
169 | */ | 158 | */ |
170 | 159 | ||
171 | /* size of the binary tree and prev array */ | 160 | /* size of the binary tree and prev array */ |
172 | unsigned size; | 161 | unsigned size; |
173 | 162 | ||
174 | /* function of size - precalculated for to_inorder() */ | 163 | /* function of size - precalculated for to_inorder() */ |
175 | unsigned extra; | 164 | unsigned extra; |
176 | 165 | ||
177 | /* copy of the last key in the set */ | 166 | /* copy of the last key in the set */ |
178 | struct bkey end; | 167 | struct bkey end; |
179 | struct bkey_float *tree; | 168 | struct bkey_float *tree; |
180 | 169 | ||
181 | /* | 170 | /* |
182 | * The nodes in the bset tree point to specific keys - this | 171 | * The nodes in the bset tree point to specific keys - this |
@@ -186,12 +175,61 @@ struct bset_tree { | |||
186 | * to keep bkey_float to 4 bytes and prev isn't used in the fast | 175 | * to keep bkey_float to 4 bytes and prev isn't used in the fast |
187 | * path. | 176 | * path. |
188 | */ | 177 | */ |
189 | uint8_t *prev; | 178 | uint8_t *prev; |
190 | 179 | ||
191 | /* The actual btree node, with pointers to each sorted set */ | 180 | /* The actual btree node, with pointers to each sorted set */ |
192 | struct bset *data; | 181 | struct bset *data; |
193 | }; | 182 | }; |
194 | 183 | ||
184 | #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t)) | ||
185 | #define set_bytes(i) __set_bytes(i, i->keys) | ||
186 | |||
187 | #define __set_blocks(i, k, block_bytes) \ | ||
188 | DIV_ROUND_UP(__set_bytes(i, k), block_bytes) | ||
189 | #define set_blocks(i, block_bytes) \ | ||
190 | __set_blocks(i, (i)->keys, block_bytes) | ||
191 | |||
192 | void bch_btree_keys_free(struct btree *); | ||
193 | int bch_btree_keys_alloc(struct btree *, unsigned, gfp_t); | ||
194 | |||
195 | void bch_bset_fix_invalidated_key(struct btree *, struct bkey *); | ||
196 | void bch_bset_init_next(struct btree *, struct bset *, uint64_t); | ||
197 | void bch_bset_insert(struct btree *, struct bkey *, struct bkey *); | ||
198 | |||
199 | /* Btree key iteration */ | ||
200 | |||
201 | struct btree_iter { | ||
202 | size_t size, used; | ||
203 | #ifdef CONFIG_BCACHE_DEBUG | ||
204 | struct btree *b; | ||
205 | #endif | ||
206 | struct btree_iter_set { | ||
207 | struct bkey *k, *end; | ||
208 | } data[MAX_BSETS]; | ||
209 | }; | ||
210 | |||
211 | typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); | ||
212 | |||
213 | struct bkey *bch_btree_iter_next(struct btree_iter *); | ||
214 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *, | ||
215 | struct btree *, ptr_filter_fn); | ||
216 | |||
217 | void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); | ||
218 | struct bkey *bch_btree_iter_init(struct btree *, struct btree_iter *, | ||
219 | struct bkey *); | ||
220 | |||
221 | struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, | ||
222 | const struct bkey *); | ||
223 | |||
224 | /* | ||
225 | * Returns the first key that is strictly greater than search | ||
226 | */ | ||
227 | static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, | ||
228 | const struct bkey *search) | ||
229 | { | ||
230 | return search ? __bch_bset_search(b, t, search) : t->data->start; | ||
231 | } | ||
232 | |||
195 | /* Sorting */ | 233 | /* Sorting */ |
196 | 234 | ||
197 | struct bset_sort_state { | 235 | struct bset_sort_state { |
@@ -219,6 +257,60 @@ static inline void bch_btree_sort(struct btree *b, | |||
219 | bch_btree_sort_partial(b, 0, state); | 257 | bch_btree_sort_partial(b, 0, state); |
220 | } | 258 | } |
221 | 259 | ||
260 | /* Bkey utility code */ | ||
261 | |||
262 | #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) | ||
263 | |||
264 | static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) | ||
265 | { | ||
266 | return bkey_idx(i->start, idx); | ||
267 | } | ||
268 | |||
269 | static inline void bkey_init(struct bkey *k) | ||
270 | { | ||
271 | *k = ZERO_KEY; | ||
272 | } | ||
273 | |||
274 | static __always_inline int64_t bkey_cmp(const struct bkey *l, | ||
275 | const struct bkey *r) | ||
276 | { | ||
277 | return unlikely(KEY_INODE(l) != KEY_INODE(r)) | ||
278 | ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) | ||
279 | : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); | ||
280 | } | ||
281 | |||
282 | void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, | ||
283 | unsigned); | ||
284 | bool __bch_cut_front(const struct bkey *, struct bkey *); | ||
285 | bool __bch_cut_back(const struct bkey *, struct bkey *); | ||
286 | |||
287 | static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) | ||
288 | { | ||
289 | BUG_ON(bkey_cmp(where, k) > 0); | ||
290 | return __bch_cut_front(where, k); | ||
291 | } | ||
292 | |||
293 | static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) | ||
294 | { | ||
295 | BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0); | ||
296 | return __bch_cut_back(where, k); | ||
297 | } | ||
298 | |||
299 | #define PRECEDING_KEY(_k) \ | ||
300 | ({ \ | ||
301 | struct bkey *_ret = NULL; \ | ||
302 | \ | ||
303 | if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ | ||
304 | _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ | ||
305 | \ | ||
306 | if (!_ret->low) \ | ||
307 | _ret->high--; \ | ||
308 | _ret->low--; \ | ||
309 | } \ | ||
310 | \ | ||
311 | _ret; \ | ||
312 | }) | ||
313 | |||
222 | /* Keylists */ | 314 | /* Keylists */ |
223 | 315 | ||
224 | struct keylist { | 316 | struct keylist { |
@@ -282,126 +374,15 @@ struct bkey *bch_keylist_pop(struct keylist *); | |||
282 | void bch_keylist_pop_front(struct keylist *); | 374 | void bch_keylist_pop_front(struct keylist *); |
283 | int __bch_keylist_realloc(struct keylist *, unsigned); | 375 | int __bch_keylist_realloc(struct keylist *, unsigned); |
284 | 376 | ||
285 | /* Bkey utility code */ | 377 | struct cache_set; |
286 | |||
287 | #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) | ||
288 | |||
289 | static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) | ||
290 | { | ||
291 | return bkey_idx(i->start, idx); | ||
292 | } | ||
293 | |||
294 | static inline void bkey_init(struct bkey *k) | ||
295 | { | ||
296 | *k = ZERO_KEY; | ||
297 | } | ||
298 | |||
299 | static __always_inline int64_t bkey_cmp(const struct bkey *l, | ||
300 | const struct bkey *r) | ||
301 | { | ||
302 | return unlikely(KEY_INODE(l) != KEY_INODE(r)) | ||
303 | ? (int64_t) KEY_INODE(l) - (int64_t) KEY_INODE(r) | ||
304 | : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); | ||
305 | } | ||
306 | |||
307 | void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, | ||
308 | unsigned); | ||
309 | bool __bch_cut_front(const struct bkey *, struct bkey *); | ||
310 | bool __bch_cut_back(const struct bkey *, struct bkey *); | ||
311 | |||
312 | static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) | ||
313 | { | ||
314 | BUG_ON(bkey_cmp(where, k) > 0); | ||
315 | return __bch_cut_front(where, k); | ||
316 | } | ||
317 | |||
318 | static inline bool bch_cut_back(const struct bkey *where, struct bkey *k) | ||
319 | { | ||
320 | BUG_ON(bkey_cmp(where, &START_KEY(k)) < 0); | ||
321 | return __bch_cut_back(where, k); | ||
322 | } | ||
323 | |||
324 | const char *bch_ptr_status(struct cache_set *, const struct bkey *); | 378 | const char *bch_ptr_status(struct cache_set *, const struct bkey *); |
325 | bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); | 379 | bool bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); |
326 | bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *); | 380 | bool bch_extent_ptr_invalid(struct cache_set *, const struct bkey *); |
381 | bool bch_btree_ptr_bad(struct btree *, const struct bkey *); | ||
382 | bool bch_extent_ptr_bad(struct btree *, const struct bkey *); | ||
327 | 383 | ||
328 | bool bch_ptr_bad(struct btree *, const struct bkey *); | 384 | bool bch_ptr_bad(struct btree *, const struct bkey *); |
329 | 385 | ||
330 | typedef bool (*ptr_filter_fn)(struct btree *, const struct bkey *); | ||
331 | |||
332 | struct bkey *bch_btree_iter_next(struct btree_iter *); | ||
333 | struct bkey *bch_btree_iter_next_filter(struct btree_iter *, | ||
334 | struct btree *, ptr_filter_fn); | ||
335 | |||
336 | void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); | ||
337 | struct bkey *bch_btree_iter_init(struct btree *, struct btree_iter *, | ||
338 | struct bkey *); | ||
339 | |||
340 | /* 32 bits total: */ | ||
341 | #define BKEY_MID_BITS 3 | ||
342 | #define BKEY_EXPONENT_BITS 7 | ||
343 | #define BKEY_MANTISSA_BITS 22 | ||
344 | #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1) | ||
345 | |||
346 | struct bkey_float { | ||
347 | unsigned exponent:BKEY_EXPONENT_BITS; | ||
348 | unsigned m:BKEY_MID_BITS; | ||
349 | unsigned mantissa:BKEY_MANTISSA_BITS; | ||
350 | } __packed; | ||
351 | |||
352 | /* | ||
353 | * BSET_CACHELINE was originally intended to match the hardware cacheline size - | ||
354 | * it used to be 64, but I realized the lookup code would touch slightly less | ||
355 | * memory if it was 128. | ||
356 | * | ||
357 | * It definites the number of bytes (in struct bset) per struct bkey_float in | ||
358 | * the auxiliar search tree - when we're done searching the bset_float tree we | ||
359 | * have this many bytes left that we do a linear search over. | ||
360 | * | ||
361 | * Since (after level 5) every level of the bset_tree is on a new cacheline, | ||
362 | * we're touching one fewer cacheline in the bset tree in exchange for one more | ||
363 | * cacheline in the linear search - but the linear search might stop before it | ||
364 | * gets to the second cacheline. | ||
365 | */ | ||
366 | |||
367 | #define BSET_CACHELINE 128 | ||
368 | #define bset_tree_space(b) (btree_data_space(b) / BSET_CACHELINE) | ||
369 | |||
370 | #define bset_tree_bytes(b) (bset_tree_space(b) * sizeof(struct bkey_float)) | ||
371 | #define bset_prev_bytes(b) (bset_tree_space(b) * sizeof(uint8_t)) | ||
372 | |||
373 | void bch_bset_init_next(struct btree *); | ||
374 | |||
375 | void bch_bset_fix_invalidated_key(struct btree *, struct bkey *); | ||
376 | void bch_bset_fix_lookup_table(struct btree *, struct bkey *); | ||
377 | |||
378 | struct bkey *__bch_bset_search(struct btree *, struct bset_tree *, | ||
379 | const struct bkey *); | ||
380 | |||
381 | /* | ||
382 | * Returns the first key that is strictly greater than search | ||
383 | */ | ||
384 | static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t, | ||
385 | const struct bkey *search) | ||
386 | { | ||
387 | return search ? __bch_bset_search(b, t, search) : t->data->start; | ||
388 | } | ||
389 | |||
390 | #define PRECEDING_KEY(_k) \ | ||
391 | ({ \ | ||
392 | struct bkey *_ret = NULL; \ | ||
393 | \ | ||
394 | if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \ | ||
395 | _ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \ | ||
396 | \ | ||
397 | if (!_ret->low) \ | ||
398 | _ret->high--; \ | ||
399 | _ret->low--; \ | ||
400 | } \ | ||
401 | \ | ||
402 | _ret; \ | ||
403 | }) | ||
404 | |||
405 | bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); | 386 | bool bch_bkey_try_merge(struct btree *, struct bkey *, struct bkey *); |
406 | 387 | ||
407 | int bch_bset_print_stats(struct cache_set *, char *); | 388 | int bch_bset_print_stats(struct cache_set *, char *); |