diff options
author | Joe Thornber <ejt@redhat.com> | 2016-09-15 08:45:44 -0400 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2016-09-22 11:12:23 -0400 |
commit | dd6a77d99859ab963503e67372ed278fe8ceab26 (patch) | |
tree | 8cf6068e462644ecb1cbbd91c5abf3fd570fa887 /drivers/md/persistent-data/dm-array.c | |
parent | b88efd43f900d608560211a18a38d450f8192948 (diff) |
dm array: add dm_array_new()
dm_array_new() creates a new, populated array more efficiently than
starting with an empty one and resizing.
Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/persistent-data/dm-array.c')
-rw-r--r-- | drivers/md/persistent-data/dm-array.c | 142 |
1 files changed, 111 insertions, 31 deletions
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c index 431a03067d64..155180954cdf 100644 --- a/drivers/md/persistent-data/dm-array.c +++ b/drivers/md/persistent-data/dm-array.c | |||
@@ -277,6 +277,48 @@ static int insert_ablock(struct dm_array_info *info, uint64_t index, | |||
277 | return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root); | 277 | return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root); |
278 | } | 278 | } |
279 | 279 | ||
280 | /*----------------------------------------------------------------*/ | ||
281 | |||
282 | static int __shadow_ablock(struct dm_array_info *info, dm_block_t b, | ||
283 | struct dm_block **block, struct array_block **ab) | ||
284 | { | ||
285 | int inc; | ||
286 | int r = dm_tm_shadow_block(info->btree_info.tm, b, | ||
287 | &array_validator, block, &inc); | ||
288 | if (r) | ||
289 | return r; | ||
290 | |||
291 | *ab = dm_block_data(*block); | ||
292 | if (inc) | ||
293 | inc_ablock_entries(info, *ab); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * The shadow op will often be a noop. Only insert if it really | ||
300 | * copied data. | ||
301 | */ | ||
302 | static int __reinsert_ablock(struct dm_array_info *info, unsigned index, | ||
303 | struct dm_block *block, dm_block_t b, | ||
304 | dm_block_t *root) | ||
305 | { | ||
306 | int r = 0; | ||
307 | |||
308 | if (dm_block_location(block) != b) { | ||
309 | /* | ||
310 | * dm_tm_shadow_block will have already decremented the old | ||
311 | * block, but it is still referenced by the btree. We | ||
312 | * increment to stop the insert decrementing it below zero | ||
313 | * when overwriting the old value. | ||
314 | */ | ||
315 | dm_tm_inc(info->btree_info.tm, b); | ||
316 | r = insert_ablock(info, index, block, root); | ||
317 | } | ||
318 | |||
319 | return r; | ||
320 | } | ||
321 | |||
280 | /* | 322 | /* |
281 | * Looks up an array block in the btree. Then shadows it, and updates the | 323 | * Looks up an array block in the btree. Then shadows it, and updates the |
282 | * btree to point to this new shadow. 'root' is an input/output parameter | 324 | * btree to point to this new shadow. 'root' is an input/output parameter |
@@ -286,49 +328,21 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root, | |||
286 | unsigned index, struct dm_block **block, | 328 | unsigned index, struct dm_block **block, |
287 | struct array_block **ab) | 329 | struct array_block **ab) |
288 | { | 330 | { |
289 | int r, inc; | 331 | int r; |
290 | uint64_t key = index; | 332 | uint64_t key = index; |
291 | dm_block_t b; | 333 | dm_block_t b; |
292 | __le64 block_le; | 334 | __le64 block_le; |
293 | 335 | ||
294 | /* | ||
295 | * lookup | ||
296 | */ | ||
297 | r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le); | 336 | r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le); |
298 | if (r) | 337 | if (r) |
299 | return r; | 338 | return r; |
300 | b = le64_to_cpu(block_le); | 339 | b = le64_to_cpu(block_le); |
301 | 340 | ||
302 | /* | 341 | r = __shadow_ablock(info, b, block, ab); |
303 | * shadow | ||
304 | */ | ||
305 | r = dm_tm_shadow_block(info->btree_info.tm, b, | ||
306 | &array_validator, block, &inc); | ||
307 | if (r) | 342 | if (r) |
308 | return r; | 343 | return r; |
309 | 344 | ||
310 | *ab = dm_block_data(*block); | 345 | return __reinsert_ablock(info, index, *block, b, root); |
311 | if (inc) | ||
312 | inc_ablock_entries(info, *ab); | ||
313 | |||
314 | /* | ||
315 | * Reinsert. | ||
316 | * | ||
317 | * The shadow op will often be a noop. Only insert if it really | ||
318 | * copied data. | ||
319 | */ | ||
320 | if (dm_block_location(*block) != b) { | ||
321 | /* | ||
322 | * dm_tm_shadow_block will have already decremented the old | ||
323 | * block, but it is still referenced by the btree. We | ||
324 | * increment to stop the insert decrementing it below zero | ||
325 | * when overwriting the old value. | ||
326 | */ | ||
327 | dm_tm_inc(info->btree_info.tm, b); | ||
328 | r = insert_ablock(info, index, *block, root); | ||
329 | } | ||
330 | |||
331 | return r; | ||
332 | } | 346 | } |
333 | 347 | ||
334 | /* | 348 | /* |
@@ -681,6 +695,72 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root, | |||
681 | } | 695 | } |
682 | EXPORT_SYMBOL_GPL(dm_array_resize); | 696 | EXPORT_SYMBOL_GPL(dm_array_resize); |
683 | 697 | ||
698 | static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab, | ||
699 | value_fn fn, void *context, unsigned base, unsigned new_nr) | ||
700 | { | ||
701 | int r; | ||
702 | unsigned i; | ||
703 | uint32_t nr_entries; | ||
704 | struct dm_btree_value_type *vt = &info->value_type; | ||
705 | |||
706 | BUG_ON(le32_to_cpu(ab->nr_entries)); | ||
707 | BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); | ||
708 | |||
709 | nr_entries = le32_to_cpu(ab->nr_entries); | ||
710 | for (i = 0; i < new_nr; i++) { | ||
711 | r = fn(base + i, element_at(info, ab, i), context); | ||
712 | if (r) | ||
713 | return r; | ||
714 | |||
715 | if (vt->inc) | ||
716 | vt->inc(vt->context, element_at(info, ab, i)); | ||
717 | } | ||
718 | |||
719 | ab->nr_entries = cpu_to_le32(new_nr); | ||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | int dm_array_new(struct dm_array_info *info, dm_block_t *root, | ||
724 | uint32_t size, value_fn fn, void *context) | ||
725 | { | ||
726 | int r; | ||
727 | struct dm_block *block; | ||
728 | struct array_block *ab; | ||
729 | unsigned block_index, end_block, size_of_block, max_entries; | ||
730 | |||
731 | r = dm_array_empty(info, root); | ||
732 | if (r) | ||
733 | return r; | ||
734 | |||
735 | size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm)); | ||
736 | max_entries = calc_max_entries(info->value_type.size, size_of_block); | ||
737 | end_block = dm_div_up(size, max_entries); | ||
738 | |||
739 | for (block_index = 0; block_index != end_block; block_index++) { | ||
740 | r = alloc_ablock(info, size_of_block, max_entries, &block, &ab); | ||
741 | if (r) | ||
742 | break; | ||
743 | |||
744 | r = populate_ablock_with_values(info, ab, fn, context, | ||
745 | block_index * max_entries, | ||
746 | min(max_entries, size)); | ||
747 | if (r) { | ||
748 | unlock_ablock(info, block); | ||
749 | break; | ||
750 | } | ||
751 | |||
752 | r = insert_ablock(info, block_index, block, root); | ||
753 | unlock_ablock(info, block); | ||
754 | if (r) | ||
755 | break; | ||
756 | |||
757 | size -= max_entries; | ||
758 | } | ||
759 | |||
760 | return r; | ||
761 | } | ||
762 | EXPORT_SYMBOL_GPL(dm_array_new); | ||
763 | |||
684 | int dm_array_del(struct dm_array_info *info, dm_block_t root) | 764 | int dm_array_del(struct dm_array_info *info, dm_block_t root) |
685 | { | 765 | { |
686 | return dm_btree_del(&info->btree_info, root); | 766 | return dm_btree_del(&info->btree_info, root); |