diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2011-03-29 01:46:06 -0400 |
---|---|---|
committer | Li Zefan <lizf@cn.fujitsu.com> | 2011-04-25 04:46:03 -0400 |
commit | 34d52cb6c50b5a43901709998f59fb1c5a43dc4a (patch) | |
tree | 151c61795cceefc97e48e8209dc36303274fbe10 | |
parent | f38b6e754d8cc4605ac21d9c1094d569d88b163b (diff) |
Btrfs: Make free space cache code generic
So we can re-use the code to cache free inode numbers.
The change is quite straightforward. Two new structures are introduced.
- struct btrfs_free_space_ctl
We move those variables that are used for caching free space from
struct btrfs_block_group_cache to this new struct.
- struct btrfs_free_space_op
We do block group specific work (e.g. calculation of extents threshold)
through functions registered in this struct.
And then we can remove references to struct btrfs_block_group_cache.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
-rw-r--r-- | fs/btrfs/ctree.h | 7 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 37 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 430 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.h | 20 |
4 files changed, 271 insertions, 223 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8c..d17e4a3b8bf7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -830,9 +830,6 @@ struct btrfs_block_group_cache { | |||
830 | u64 bytes_super; | 830 | u64 bytes_super; |
831 | u64 flags; | 831 | u64 flags; |
832 | u64 sectorsize; | 832 | u64 sectorsize; |
833 | int extents_thresh; | ||
834 | int free_extents; | ||
835 | int total_bitmaps; | ||
836 | unsigned int ro:1; | 833 | unsigned int ro:1; |
837 | unsigned int dirty:1; | 834 | unsigned int dirty:1; |
838 | unsigned int iref:1; | 835 | unsigned int iref:1; |
@@ -847,9 +844,7 @@ struct btrfs_block_group_cache { | |||
847 | struct btrfs_space_info *space_info; | 844 | struct btrfs_space_info *space_info; |
848 | 845 | ||
849 | /* free space cache stuff */ | 846 | /* free space cache stuff */ |
850 | spinlock_t tree_lock; | 847 | struct btrfs_free_space_ctl *free_space_ctl; |
851 | struct rb_root free_space_offset; | ||
852 | u64 free_space; | ||
853 | 848 | ||
854 | /* block group cache stuff */ | 849 | /* block group cache stuff */ |
855 | struct rb_node cache_node; | 850 | struct rb_node cache_node; |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 31f33ba56fe8..904eae10ec65 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) | |||
105 | WARN_ON(cache->pinned > 0); | 105 | WARN_ON(cache->pinned > 0); |
106 | WARN_ON(cache->reserved > 0); | 106 | WARN_ON(cache->reserved > 0); |
107 | WARN_ON(cache->reserved_pinned > 0); | 107 | WARN_ON(cache->reserved_pinned > 0); |
108 | kfree(cache->free_space_ctl); | ||
108 | kfree(cache); | 109 | kfree(cache); |
109 | } | 110 | } |
110 | } | 111 | } |
@@ -4893,7 +4894,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, | |||
4893 | return 0; | 4894 | return 0; |
4894 | 4895 | ||
4895 | wait_event(caching_ctl->wait, block_group_cache_done(cache) || | 4896 | wait_event(caching_ctl->wait, block_group_cache_done(cache) || |
4896 | (cache->free_space >= num_bytes)); | 4897 | (cache->free_space_ctl->free_space >= num_bytes)); |
4897 | 4898 | ||
4898 | put_caching_control(caching_ctl); | 4899 | put_caching_control(caching_ctl); |
4899 | return 0; | 4900 | return 0; |
@@ -8551,10 +8552,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8551 | ret = -ENOMEM; | 8552 | ret = -ENOMEM; |
8552 | goto error; | 8553 | goto error; |
8553 | } | 8554 | } |
8555 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | ||
8556 | GFP_NOFS); | ||
8557 | if (!cache->free_space_ctl) { | ||
8558 | kfree(cache); | ||
8559 | ret = -ENOMEM; | ||
8560 | goto error; | ||
8561 | } | ||
8554 | 8562 | ||
8555 | atomic_set(&cache->count, 1); | 8563 | atomic_set(&cache->count, 1); |
8556 | spin_lock_init(&cache->lock); | 8564 | spin_lock_init(&cache->lock); |
8557 | spin_lock_init(&cache->tree_lock); | ||
8558 | cache->fs_info = info; | 8565 | cache->fs_info = info; |
8559 | INIT_LIST_HEAD(&cache->list); | 8566 | INIT_LIST_HEAD(&cache->list); |
8560 | INIT_LIST_HEAD(&cache->cluster_list); | 8567 | INIT_LIST_HEAD(&cache->cluster_list); |
@@ -8562,14 +8569,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8562 | if (need_clear) | 8569 | if (need_clear) |
8563 | cache->disk_cache_state = BTRFS_DC_CLEAR; | 8570 | cache->disk_cache_state = BTRFS_DC_CLEAR; |
8564 | 8571 | ||
8565 | /* | ||
8566 | * we only want to have 32k of ram per block group for keeping | ||
8567 | * track of free space, and if we pass 1/2 of that we want to | ||
8568 | * start converting things over to using bitmaps | ||
8569 | */ | ||
8570 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
8571 | sizeof(struct btrfs_free_space); | ||
8572 | |||
8573 | read_extent_buffer(leaf, &cache->item, | 8572 | read_extent_buffer(leaf, &cache->item, |
8574 | btrfs_item_ptr_offset(leaf, path->slots[0]), | 8573 | btrfs_item_ptr_offset(leaf, path->slots[0]), |
8575 | sizeof(cache->item)); | 8574 | sizeof(cache->item)); |
@@ -8580,6 +8579,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8580 | cache->flags = btrfs_block_group_flags(&cache->item); | 8579 | cache->flags = btrfs_block_group_flags(&cache->item); |
8581 | cache->sectorsize = root->sectorsize; | 8580 | cache->sectorsize = root->sectorsize; |
8582 | 8581 | ||
8582 | btrfs_init_free_space_ctl(cache); | ||
8583 | |||
8583 | /* | 8584 | /* |
8584 | * We need to exclude the super stripes now so that the space | 8585 | * We need to exclude the super stripes now so that the space |
8585 | * info has super bytes accounted for, otherwise we'll think | 8586 | * info has super bytes accounted for, otherwise we'll think |
@@ -8666,6 +8667,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
8666 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | 8667 | cache = kzalloc(sizeof(*cache), GFP_NOFS); |
8667 | if (!cache) | 8668 | if (!cache) |
8668 | return -ENOMEM; | 8669 | return -ENOMEM; |
8670 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | ||
8671 | GFP_NOFS); | ||
8672 | if (!cache->free_space_ctl) { | ||
8673 | kfree(cache); | ||
8674 | return -ENOMEM; | ||
8675 | } | ||
8669 | 8676 | ||
8670 | cache->key.objectid = chunk_offset; | 8677 | cache->key.objectid = chunk_offset; |
8671 | cache->key.offset = size; | 8678 | cache->key.offset = size; |
@@ -8673,19 +8680,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
8673 | cache->sectorsize = root->sectorsize; | 8680 | cache->sectorsize = root->sectorsize; |
8674 | cache->fs_info = root->fs_info; | 8681 | cache->fs_info = root->fs_info; |
8675 | 8682 | ||
8676 | /* | ||
8677 | * we only want to have 32k of ram per block group for keeping track | ||
8678 | * of free space, and if we pass 1/2 of that we want to start | ||
8679 | * converting things over to using bitmaps | ||
8680 | */ | ||
8681 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
8682 | sizeof(struct btrfs_free_space); | ||
8683 | atomic_set(&cache->count, 1); | 8683 | atomic_set(&cache->count, 1); |
8684 | spin_lock_init(&cache->lock); | 8684 | spin_lock_init(&cache->lock); |
8685 | spin_lock_init(&cache->tree_lock); | ||
8686 | INIT_LIST_HEAD(&cache->list); | 8685 | INIT_LIST_HEAD(&cache->list); |
8687 | INIT_LIST_HEAD(&cache->cluster_list); | 8686 | INIT_LIST_HEAD(&cache->cluster_list); |
8688 | 8687 | ||
8688 | btrfs_init_free_space_ctl(cache); | ||
8689 | |||
8689 | btrfs_set_block_group_used(&cache->item, bytes_used); | 8690 | btrfs_set_block_group_used(&cache->item, bytes_used); |
8690 | btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); | 8691 | btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); |
8691 | cache->flags = type; | 8692 | cache->flags = type; |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 0e23bbabbba2..d4fb4f077a79 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -29,9 +29,7 @@ | |||
29 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) | 29 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
30 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) | 30 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
31 | 31 | ||
32 | static void recalculate_thresholds(struct btrfs_block_group_cache | 32 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
33 | *block_group); | ||
34 | static int link_free_space(struct btrfs_block_group_cache *block_group, | ||
35 | struct btrfs_free_space *info); | 33 | struct btrfs_free_space *info); |
36 | 34 | ||
37 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | 35 | struct inode *lookup_free_space_inode(struct btrfs_root *root, |
@@ -212,6 +210,7 @@ static int readahead_cache(struct inode *inode) | |||
212 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | 210 | int load_free_space_cache(struct btrfs_fs_info *fs_info, |
213 | struct btrfs_block_group_cache *block_group) | 211 | struct btrfs_block_group_cache *block_group) |
214 | { | 212 | { |
213 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
215 | struct btrfs_root *root = fs_info->tree_root; | 214 | struct btrfs_root *root = fs_info->tree_root; |
216 | struct inode *inode; | 215 | struct inode *inode; |
217 | struct btrfs_free_space_header *header; | 216 | struct btrfs_free_space_header *header; |
@@ -417,9 +416,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
417 | } | 416 | } |
418 | 417 | ||
419 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { | 418 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { |
420 | spin_lock(&block_group->tree_lock); | 419 | spin_lock(&ctl->tree_lock); |
421 | ret = link_free_space(block_group, e); | 420 | ret = link_free_space(ctl, e); |
422 | spin_unlock(&block_group->tree_lock); | 421 | spin_unlock(&ctl->tree_lock); |
423 | BUG_ON(ret); | 422 | BUG_ON(ret); |
424 | } else { | 423 | } else { |
425 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 424 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
@@ -431,11 +430,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
431 | page_cache_release(page); | 430 | page_cache_release(page); |
432 | goto free_cache; | 431 | goto free_cache; |
433 | } | 432 | } |
434 | spin_lock(&block_group->tree_lock); | 433 | spin_lock(&ctl->tree_lock); |
435 | ret = link_free_space(block_group, e); | 434 | ret = link_free_space(ctl, e); |
436 | block_group->total_bitmaps++; | 435 | ctl->total_bitmaps++; |
437 | recalculate_thresholds(block_group); | 436 | ctl->op->recalc_thresholds(ctl); |
438 | spin_unlock(&block_group->tree_lock); | 437 | spin_unlock(&ctl->tree_lock); |
439 | list_add_tail(&e->list, &bitmaps); | 438 | list_add_tail(&e->list, &bitmaps); |
440 | } | 439 | } |
441 | 440 | ||
@@ -471,16 +470,16 @@ next: | |||
471 | index++; | 470 | index++; |
472 | } | 471 | } |
473 | 472 | ||
474 | spin_lock(&block_group->tree_lock); | 473 | spin_lock(&ctl->tree_lock); |
475 | if (block_group->free_space != (block_group->key.offset - used - | 474 | if (ctl->free_space != (block_group->key.offset - used - |
476 | block_group->bytes_super)) { | 475 | block_group->bytes_super)) { |
477 | spin_unlock(&block_group->tree_lock); | 476 | spin_unlock(&ctl->tree_lock); |
478 | printk(KERN_ERR "block group %llu has an wrong amount of free " | 477 | printk(KERN_ERR "block group %llu has an wrong amount of free " |
479 | "space\n", block_group->key.objectid); | 478 | "space\n", block_group->key.objectid); |
480 | ret = 0; | 479 | ret = 0; |
481 | goto free_cache; | 480 | goto free_cache; |
482 | } | 481 | } |
483 | spin_unlock(&block_group->tree_lock); | 482 | spin_unlock(&ctl->tree_lock); |
484 | 483 | ||
485 | ret = 1; | 484 | ret = 1; |
486 | out: | 485 | out: |
@@ -503,6 +502,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
503 | struct btrfs_block_group_cache *block_group, | 502 | struct btrfs_block_group_cache *block_group, |
504 | struct btrfs_path *path) | 503 | struct btrfs_path *path) |
505 | { | 504 | { |
505 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
506 | struct btrfs_free_space_header *header; | 506 | struct btrfs_free_space_header *header; |
507 | struct extent_buffer *leaf; | 507 | struct extent_buffer *leaf; |
508 | struct inode *inode; | 508 | struct inode *inode; |
@@ -546,7 +546,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
546 | return 0; | 546 | return 0; |
547 | } | 547 | } |
548 | 548 | ||
549 | node = rb_first(&block_group->free_space_offset); | 549 | node = rb_first(&ctl->free_space_offset); |
550 | if (!node) { | 550 | if (!node) { |
551 | iput(inode); | 551 | iput(inode); |
552 | return 0; | 552 | return 0; |
@@ -851,30 +851,30 @@ out_free: | |||
851 | return ret; | 851 | return ret; |
852 | } | 852 | } |
853 | 853 | ||
854 | static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, | 854 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, |
855 | u64 offset) | 855 | u64 offset) |
856 | { | 856 | { |
857 | BUG_ON(offset < bitmap_start); | 857 | BUG_ON(offset < bitmap_start); |
858 | offset -= bitmap_start; | 858 | offset -= bitmap_start; |
859 | return (unsigned long)(div64_u64(offset, sectorsize)); | 859 | return (unsigned long)(div_u64(offset, unit)); |
860 | } | 860 | } |
861 | 861 | ||
862 | static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) | 862 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
863 | { | 863 | { |
864 | return (unsigned long)(div64_u64(bytes, sectorsize)); | 864 | return (unsigned long)(div_u64(bytes, unit)); |
865 | } | 865 | } |
866 | 866 | ||
867 | static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, | 867 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
868 | u64 offset) | 868 | u64 offset) |
869 | { | 869 | { |
870 | u64 bitmap_start; | 870 | u64 bitmap_start; |
871 | u64 bytes_per_bitmap; | 871 | u64 bytes_per_bitmap; |
872 | 872 | ||
873 | bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; | 873 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
874 | bitmap_start = offset - block_group->key.objectid; | 874 | bitmap_start = offset - ctl->start; |
875 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); | 875 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
876 | bitmap_start *= bytes_per_bitmap; | 876 | bitmap_start *= bytes_per_bitmap; |
877 | bitmap_start += block_group->key.objectid; | 877 | bitmap_start += ctl->start; |
878 | 878 | ||
879 | return bitmap_start; | 879 | return bitmap_start; |
880 | } | 880 | } |
@@ -932,10 +932,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, | |||
932 | * offset. | 932 | * offset. |
933 | */ | 933 | */ |
934 | static struct btrfs_free_space * | 934 | static struct btrfs_free_space * |
935 | tree_search_offset(struct btrfs_block_group_cache *block_group, | 935 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
936 | u64 offset, int bitmap_only, int fuzzy) | 936 | u64 offset, int bitmap_only, int fuzzy) |
937 | { | 937 | { |
938 | struct rb_node *n = block_group->free_space_offset.rb_node; | 938 | struct rb_node *n = ctl->free_space_offset.rb_node; |
939 | struct btrfs_free_space *entry, *prev = NULL; | 939 | struct btrfs_free_space *entry, *prev = NULL; |
940 | 940 | ||
941 | /* find entry that is closest to the 'offset' */ | 941 | /* find entry that is closest to the 'offset' */ |
@@ -1031,8 +1031,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1031 | break; | 1031 | break; |
1032 | } | 1032 | } |
1033 | } | 1033 | } |
1034 | if (entry->offset + BITS_PER_BITMAP * | 1034 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
1035 | block_group->sectorsize > offset) | ||
1036 | return entry; | 1035 | return entry; |
1037 | } else if (entry->offset + entry->bytes > offset) | 1036 | } else if (entry->offset + entry->bytes > offset) |
1038 | return entry; | 1037 | return entry; |
@@ -1043,7 +1042,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1043 | while (1) { | 1042 | while (1) { |
1044 | if (entry->bitmap) { | 1043 | if (entry->bitmap) { |
1045 | if (entry->offset + BITS_PER_BITMAP * | 1044 | if (entry->offset + BITS_PER_BITMAP * |
1046 | block_group->sectorsize > offset) | 1045 | ctl->unit > offset) |
1047 | break; | 1046 | break; |
1048 | } else { | 1047 | } else { |
1049 | if (entry->offset + entry->bytes > offset) | 1048 | if (entry->offset + entry->bytes > offset) |
@@ -1059,42 +1058,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1059 | } | 1058 | } |
1060 | 1059 | ||
1061 | static inline void | 1060 | static inline void |
1062 | __unlink_free_space(struct btrfs_block_group_cache *block_group, | 1061 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, |
1063 | struct btrfs_free_space *info) | 1062 | struct btrfs_free_space *info) |
1064 | { | 1063 | { |
1065 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 1064 | rb_erase(&info->offset_index, &ctl->free_space_offset); |
1066 | block_group->free_extents--; | 1065 | ctl->free_extents--; |
1067 | } | 1066 | } |
1068 | 1067 | ||
1069 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 1068 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
1070 | struct btrfs_free_space *info) | 1069 | struct btrfs_free_space *info) |
1071 | { | 1070 | { |
1072 | __unlink_free_space(block_group, info); | 1071 | __unlink_free_space(ctl, info); |
1073 | block_group->free_space -= info->bytes; | 1072 | ctl->free_space -= info->bytes; |
1074 | } | 1073 | } |
1075 | 1074 | ||
1076 | static int link_free_space(struct btrfs_block_group_cache *block_group, | 1075 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
1077 | struct btrfs_free_space *info) | 1076 | struct btrfs_free_space *info) |
1078 | { | 1077 | { |
1079 | int ret = 0; | 1078 | int ret = 0; |
1080 | 1079 | ||
1081 | BUG_ON(!info->bitmap && !info->bytes); | 1080 | BUG_ON(!info->bitmap && !info->bytes); |
1082 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 1081 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
1083 | &info->offset_index, (info->bitmap != NULL)); | 1082 | &info->offset_index, (info->bitmap != NULL)); |
1084 | if (ret) | 1083 | if (ret) |
1085 | return ret; | 1084 | return ret; |
1086 | 1085 | ||
1087 | block_group->free_space += info->bytes; | 1086 | ctl->free_space += info->bytes; |
1088 | block_group->free_extents++; | 1087 | ctl->free_extents++; |
1089 | return ret; | 1088 | return ret; |
1090 | } | 1089 | } |
1091 | 1090 | ||
1092 | static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | 1091 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
1093 | { | 1092 | { |
1093 | struct btrfs_block_group_cache *block_group = ctl->private; | ||
1094 | u64 max_bytes; | 1094 | u64 max_bytes; |
1095 | u64 bitmap_bytes; | 1095 | u64 bitmap_bytes; |
1096 | u64 extent_bytes; | 1096 | u64 extent_bytes; |
1097 | u64 size = block_group->key.offset; | 1097 | u64 size = block_group->key.offset; |
1098 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | ||
1099 | int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); | ||
1100 | |||
1101 | BUG_ON(ctl->total_bitmaps > max_bitmaps); | ||
1098 | 1102 | ||
1099 | /* | 1103 | /* |
1100 | * The goal is to keep the total amount of memory used per 1gb of space | 1104 | * The goal is to keep the total amount of memory used per 1gb of space |
@@ -1112,10 +1116,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1112 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | 1116 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as |
1113 | * we add more bitmaps. | 1117 | * we add more bitmaps. |
1114 | */ | 1118 | */ |
1115 | bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; | 1119 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; |
1116 | 1120 | ||
1117 | if (bitmap_bytes >= max_bytes) { | 1121 | if (bitmap_bytes >= max_bytes) { |
1118 | block_group->extents_thresh = 0; | 1122 | ctl->extents_thresh = 0; |
1119 | return; | 1123 | return; |
1120 | } | 1124 | } |
1121 | 1125 | ||
@@ -1126,43 +1130,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1126 | extent_bytes = max_bytes - bitmap_bytes; | 1130 | extent_bytes = max_bytes - bitmap_bytes; |
1127 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); | 1131 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); |
1128 | 1132 | ||
1129 | block_group->extents_thresh = | 1133 | ctl->extents_thresh = |
1130 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); | 1134 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); |
1131 | } | 1135 | } |
1132 | 1136 | ||
1133 | static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, | 1137 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1134 | struct btrfs_free_space *info, u64 offset, | 1138 | struct btrfs_free_space *info, u64 offset, |
1135 | u64 bytes) | 1139 | u64 bytes) |
1136 | { | 1140 | { |
1137 | unsigned long start, count; | 1141 | unsigned long start, count; |
1138 | 1142 | ||
1139 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1143 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1140 | count = bytes_to_bits(bytes, block_group->sectorsize); | 1144 | count = bytes_to_bits(bytes, ctl->unit); |
1141 | BUG_ON(start + count > BITS_PER_BITMAP); | 1145 | BUG_ON(start + count > BITS_PER_BITMAP); |
1142 | 1146 | ||
1143 | bitmap_clear(info->bitmap, start, count); | 1147 | bitmap_clear(info->bitmap, start, count); |
1144 | 1148 | ||
1145 | info->bytes -= bytes; | 1149 | info->bytes -= bytes; |
1146 | block_group->free_space -= bytes; | 1150 | ctl->free_space -= bytes; |
1147 | } | 1151 | } |
1148 | 1152 | ||
1149 | static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, | 1153 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
1150 | struct btrfs_free_space *info, u64 offset, | 1154 | struct btrfs_free_space *info, u64 offset, |
1151 | u64 bytes) | 1155 | u64 bytes) |
1152 | { | 1156 | { |
1153 | unsigned long start, count; | 1157 | unsigned long start, count; |
1154 | 1158 | ||
1155 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1159 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1156 | count = bytes_to_bits(bytes, block_group->sectorsize); | 1160 | count = bytes_to_bits(bytes, ctl->unit); |
1157 | BUG_ON(start + count > BITS_PER_BITMAP); | 1161 | BUG_ON(start + count > BITS_PER_BITMAP); |
1158 | 1162 | ||
1159 | bitmap_set(info->bitmap, start, count); | 1163 | bitmap_set(info->bitmap, start, count); |
1160 | 1164 | ||
1161 | info->bytes += bytes; | 1165 | info->bytes += bytes; |
1162 | block_group->free_space += bytes; | 1166 | ctl->free_space += bytes; |
1163 | } | 1167 | } |
1164 | 1168 | ||
1165 | static int search_bitmap(struct btrfs_block_group_cache *block_group, | 1169 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
1166 | struct btrfs_free_space *bitmap_info, u64 *offset, | 1170 | struct btrfs_free_space *bitmap_info, u64 *offset, |
1167 | u64 *bytes) | 1171 | u64 *bytes) |
1168 | { | 1172 | { |
@@ -1170,9 +1174,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
1170 | unsigned long bits, i; | 1174 | unsigned long bits, i; |
1171 | unsigned long next_zero; | 1175 | unsigned long next_zero; |
1172 | 1176 | ||
1173 | i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, | 1177 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
1174 | max_t(u64, *offset, bitmap_info->offset)); | 1178 | max_t(u64, *offset, bitmap_info->offset)); |
1175 | bits = bytes_to_bits(*bytes, block_group->sectorsize); | 1179 | bits = bytes_to_bits(*bytes, ctl->unit); |
1176 | 1180 | ||
1177 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | 1181 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); |
1178 | i < BITS_PER_BITMAP; | 1182 | i < BITS_PER_BITMAP; |
@@ -1187,29 +1191,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
1187 | } | 1191 | } |
1188 | 1192 | ||
1189 | if (found_bits) { | 1193 | if (found_bits) { |
1190 | *offset = (u64)(i * block_group->sectorsize) + | 1194 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
1191 | bitmap_info->offset; | 1195 | *bytes = (u64)(found_bits) * ctl->unit; |
1192 | *bytes = (u64)(found_bits) * block_group->sectorsize; | ||
1193 | return 0; | 1196 | return 0; |
1194 | } | 1197 | } |
1195 | 1198 | ||
1196 | return -1; | 1199 | return -1; |
1197 | } | 1200 | } |
1198 | 1201 | ||
1199 | static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | 1202 | static struct btrfs_free_space * |
1200 | *block_group, u64 *offset, | 1203 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) |
1201 | u64 *bytes, int debug) | ||
1202 | { | 1204 | { |
1203 | struct btrfs_free_space *entry; | 1205 | struct btrfs_free_space *entry; |
1204 | struct rb_node *node; | 1206 | struct rb_node *node; |
1205 | int ret; | 1207 | int ret; |
1206 | 1208 | ||
1207 | if (!block_group->free_space_offset.rb_node) | 1209 | if (!ctl->free_space_offset.rb_node) |
1208 | return NULL; | 1210 | return NULL; |
1209 | 1211 | ||
1210 | entry = tree_search_offset(block_group, | 1212 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
1211 | offset_to_bitmap(block_group, *offset), | ||
1212 | 0, 1); | ||
1213 | if (!entry) | 1213 | if (!entry) |
1214 | return NULL; | 1214 | return NULL; |
1215 | 1215 | ||
@@ -1219,7 +1219,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
1219 | continue; | 1219 | continue; |
1220 | 1220 | ||
1221 | if (entry->bitmap) { | 1221 | if (entry->bitmap) { |
1222 | ret = search_bitmap(block_group, entry, offset, bytes); | 1222 | ret = search_bitmap(ctl, entry, offset, bytes); |
1223 | if (!ret) | 1223 | if (!ret) |
1224 | return entry; | 1224 | return entry; |
1225 | continue; | 1225 | continue; |
@@ -1233,33 +1233,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
1233 | return NULL; | 1233 | return NULL; |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | 1236 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
1237 | struct btrfs_free_space *info, u64 offset) | 1237 | struct btrfs_free_space *info, u64 offset) |
1238 | { | 1238 | { |
1239 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | 1239 | info->offset = offset_to_bitmap(ctl, offset); |
1240 | int max_bitmaps = (int)div64_u64(block_group->key.offset + | ||
1241 | bytes_per_bg - 1, bytes_per_bg); | ||
1242 | BUG_ON(block_group->total_bitmaps >= max_bitmaps); | ||
1243 | |||
1244 | info->offset = offset_to_bitmap(block_group, offset); | ||
1245 | info->bytes = 0; | 1240 | info->bytes = 0; |
1246 | link_free_space(block_group, info); | 1241 | link_free_space(ctl, info); |
1247 | block_group->total_bitmaps++; | 1242 | ctl->total_bitmaps++; |
1248 | 1243 | ||
1249 | recalculate_thresholds(block_group); | 1244 | ctl->op->recalc_thresholds(ctl); |
1250 | } | 1245 | } |
1251 | 1246 | ||
1252 | static void free_bitmap(struct btrfs_block_group_cache *block_group, | 1247 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
1253 | struct btrfs_free_space *bitmap_info) | 1248 | struct btrfs_free_space *bitmap_info) |
1254 | { | 1249 | { |
1255 | unlink_free_space(block_group, bitmap_info); | 1250 | unlink_free_space(ctl, bitmap_info); |
1256 | kfree(bitmap_info->bitmap); | 1251 | kfree(bitmap_info->bitmap); |
1257 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); | 1252 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
1258 | block_group->total_bitmaps--; | 1253 | ctl->total_bitmaps--; |
1259 | recalculate_thresholds(block_group); | 1254 | ctl->op->recalc_thresholds(ctl); |
1260 | } | 1255 | } |
1261 | 1256 | ||
1262 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1257 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, |
1263 | struct btrfs_free_space *bitmap_info, | 1258 | struct btrfs_free_space *bitmap_info, |
1264 | u64 *offset, u64 *bytes) | 1259 | u64 *offset, u64 *bytes) |
1265 | { | 1260 | { |
@@ -1268,8 +1263,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro | |||
1268 | int ret; | 1263 | int ret; |
1269 | 1264 | ||
1270 | again: | 1265 | again: |
1271 | end = bitmap_info->offset + | 1266 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
1272 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | ||
1273 | 1267 | ||
1274 | /* | 1268 | /* |
1275 | * XXX - this can go away after a few releases. | 1269 | * XXX - this can go away after a few releases. |
@@ -1284,24 +1278,22 @@ again: | |||
1284 | search_start = *offset; | 1278 | search_start = *offset; |
1285 | search_bytes = *bytes; | 1279 | search_bytes = *bytes; |
1286 | search_bytes = min(search_bytes, end - search_start + 1); | 1280 | search_bytes = min(search_bytes, end - search_start + 1); |
1287 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1281 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); |
1288 | &search_bytes); | ||
1289 | BUG_ON(ret < 0 || search_start != *offset); | 1282 | BUG_ON(ret < 0 || search_start != *offset); |
1290 | 1283 | ||
1291 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | 1284 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { |
1292 | bitmap_clear_bits(block_group, bitmap_info, *offset, | 1285 | bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); |
1293 | end - *offset + 1); | ||
1294 | *bytes -= end - *offset + 1; | 1286 | *bytes -= end - *offset + 1; |
1295 | *offset = end + 1; | 1287 | *offset = end + 1; |
1296 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { | 1288 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { |
1297 | bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); | 1289 | bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); |
1298 | *bytes = 0; | 1290 | *bytes = 0; |
1299 | } | 1291 | } |
1300 | 1292 | ||
1301 | if (*bytes) { | 1293 | if (*bytes) { |
1302 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1294 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
1303 | if (!bitmap_info->bytes) | 1295 | if (!bitmap_info->bytes) |
1304 | free_bitmap(block_group, bitmap_info); | 1296 | free_bitmap(ctl, bitmap_info); |
1305 | 1297 | ||
1306 | /* | 1298 | /* |
1307 | * no entry after this bitmap, but we still have bytes to | 1299 | * no entry after this bitmap, but we still have bytes to |
@@ -1328,31 +1320,28 @@ again: | |||
1328 | */ | 1320 | */ |
1329 | search_start = *offset; | 1321 | search_start = *offset; |
1330 | search_bytes = *bytes; | 1322 | search_bytes = *bytes; |
1331 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1323 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
1332 | &search_bytes); | 1324 | &search_bytes); |
1333 | if (ret < 0 || search_start != *offset) | 1325 | if (ret < 0 || search_start != *offset) |
1334 | return -EAGAIN; | 1326 | return -EAGAIN; |
1335 | 1327 | ||
1336 | goto again; | 1328 | goto again; |
1337 | } else if (!bitmap_info->bytes) | 1329 | } else if (!bitmap_info->bytes) |
1338 | free_bitmap(block_group, bitmap_info); | 1330 | free_bitmap(ctl, bitmap_info); |
1339 | 1331 | ||
1340 | return 0; | 1332 | return 0; |
1341 | } | 1333 | } |
1342 | 1334 | ||
1343 | static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | 1335 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1344 | struct btrfs_free_space *info) | 1336 | struct btrfs_free_space *info) |
1345 | { | 1337 | { |
1346 | struct btrfs_free_space *bitmap_info; | 1338 | struct btrfs_block_group_cache *block_group = ctl->private; |
1347 | int added = 0; | ||
1348 | u64 bytes, offset, end; | ||
1349 | int ret; | ||
1350 | 1339 | ||
1351 | /* | 1340 | /* |
1352 | * If we are below the extents threshold then we can add this as an | 1341 | * If we are below the extents threshold then we can add this as an |
1353 | * extent, and don't have to deal with the bitmap | 1342 | * extent, and don't have to deal with the bitmap |
1354 | */ | 1343 | */ |
1355 | if (block_group->free_extents < block_group->extents_thresh) { | 1344 | if (ctl->free_extents < ctl->extents_thresh) { |
1356 | /* | 1345 | /* |
1357 | * If this block group has some small extents we don't want to | 1346 | * If this block group has some small extents we don't want to |
1358 | * use up all of our free slots in the cache with them, we want | 1347 | * use up all of our free slots in the cache with them, we want |
@@ -1361,11 +1350,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1361 | * the overhead of a bitmap if we don't have to. | 1350 | * the overhead of a bitmap if we don't have to. |
1362 | */ | 1351 | */ |
1363 | if (info->bytes <= block_group->sectorsize * 4) { | 1352 | if (info->bytes <= block_group->sectorsize * 4) { |
1364 | if (block_group->free_extents * 2 <= | 1353 | if (ctl->free_extents * 2 <= ctl->extents_thresh) |
1365 | block_group->extents_thresh) | 1354 | return false; |
1366 | return 0; | ||
1367 | } else { | 1355 | } else { |
1368 | return 0; | 1356 | return false; |
1369 | } | 1357 | } |
1370 | } | 1358 | } |
1371 | 1359 | ||
@@ -1375,31 +1363,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1375 | */ | 1363 | */ |
1376 | if (BITS_PER_BITMAP * block_group->sectorsize > | 1364 | if (BITS_PER_BITMAP * block_group->sectorsize > |
1377 | block_group->key.offset) | 1365 | block_group->key.offset) |
1378 | return 0; | 1366 | return false; |
1367 | |||
1368 | return true; | ||
1369 | } | ||
1370 | |||
1371 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1372 | struct btrfs_free_space *info) | ||
1373 | { | ||
1374 | struct btrfs_free_space *bitmap_info; | ||
1375 | int added = 0; | ||
1376 | u64 bytes, offset, end; | ||
1377 | int ret; | ||
1379 | 1378 | ||
1380 | bytes = info->bytes; | 1379 | bytes = info->bytes; |
1381 | offset = info->offset; | 1380 | offset = info->offset; |
1382 | 1381 | ||
1382 | if (!ctl->op->use_bitmap(ctl, info)) | ||
1383 | return 0; | ||
1384 | |||
1383 | again: | 1385 | again: |
1384 | bitmap_info = tree_search_offset(block_group, | 1386 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1385 | offset_to_bitmap(block_group, offset), | ||
1386 | 1, 0); | 1387 | 1, 0); |
1387 | if (!bitmap_info) { | 1388 | if (!bitmap_info) { |
1388 | BUG_ON(added); | 1389 | BUG_ON(added); |
1389 | goto new_bitmap; | 1390 | goto new_bitmap; |
1390 | } | 1391 | } |
1391 | 1392 | ||
1392 | end = bitmap_info->offset + | 1393 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); |
1393 | (u64)(BITS_PER_BITMAP * block_group->sectorsize); | ||
1394 | 1394 | ||
1395 | if (offset >= bitmap_info->offset && offset + bytes > end) { | 1395 | if (offset >= bitmap_info->offset && offset + bytes > end) { |
1396 | bitmap_set_bits(block_group, bitmap_info, offset, | 1396 | bitmap_set_bits(ctl, bitmap_info, offset, end - offset); |
1397 | end - offset); | ||
1398 | bytes -= end - offset; | 1397 | bytes -= end - offset; |
1399 | offset = end; | 1398 | offset = end; |
1400 | added = 0; | 1399 | added = 0; |
1401 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | 1400 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { |
1402 | bitmap_set_bits(block_group, bitmap_info, offset, bytes); | 1401 | bitmap_set_bits(ctl, bitmap_info, offset, bytes); |
1403 | bytes = 0; | 1402 | bytes = 0; |
1404 | } else { | 1403 | } else { |
1405 | BUG(); | 1404 | BUG(); |
@@ -1413,19 +1412,19 @@ again: | |||
1413 | 1412 | ||
1414 | new_bitmap: | 1413 | new_bitmap: |
1415 | if (info && info->bitmap) { | 1414 | if (info && info->bitmap) { |
1416 | add_new_bitmap(block_group, info, offset); | 1415 | add_new_bitmap(ctl, info, offset); |
1417 | added = 1; | 1416 | added = 1; |
1418 | info = NULL; | 1417 | info = NULL; |
1419 | goto again; | 1418 | goto again; |
1420 | } else { | 1419 | } else { |
1421 | spin_unlock(&block_group->tree_lock); | 1420 | spin_unlock(&ctl->tree_lock); |
1422 | 1421 | ||
1423 | /* no pre-allocated info, allocate a new one */ | 1422 | /* no pre-allocated info, allocate a new one */ |
1424 | if (!info) { | 1423 | if (!info) { |
1425 | info = kmem_cache_zalloc(btrfs_free_space_cachep, | 1424 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
1426 | GFP_NOFS); | 1425 | GFP_NOFS); |
1427 | if (!info) { | 1426 | if (!info) { |
1428 | spin_lock(&block_group->tree_lock); | 1427 | spin_lock(&ctl->tree_lock); |
1429 | ret = -ENOMEM; | 1428 | ret = -ENOMEM; |
1430 | goto out; | 1429 | goto out; |
1431 | } | 1430 | } |
@@ -1433,7 +1432,7 @@ new_bitmap: | |||
1433 | 1432 | ||
1434 | /* allocate the bitmap */ | 1433 | /* allocate the bitmap */ |
1435 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 1434 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
1436 | spin_lock(&block_group->tree_lock); | 1435 | spin_lock(&ctl->tree_lock); |
1437 | if (!info->bitmap) { | 1436 | if (!info->bitmap) { |
1438 | ret = -ENOMEM; | 1437 | ret = -ENOMEM; |
1439 | goto out; | 1438 | goto out; |
@@ -1451,7 +1450,7 @@ out: | |||
1451 | return ret; | 1450 | return ret; |
1452 | } | 1451 | } |
1453 | 1452 | ||
1454 | bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | 1453 | bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
1455 | struct btrfs_free_space *info, bool update_stat) | 1454 | struct btrfs_free_space *info, bool update_stat) |
1456 | { | 1455 | { |
1457 | struct btrfs_free_space *left_info; | 1456 | struct btrfs_free_space *left_info; |
@@ -1465,18 +1464,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1465 | * are adding, if there is remove that struct and add a new one to | 1464 | * are adding, if there is remove that struct and add a new one to |
1466 | * cover the entire range | 1465 | * cover the entire range |
1467 | */ | 1466 | */ |
1468 | right_info = tree_search_offset(block_group, offset + bytes, 0, 0); | 1467 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
1469 | if (right_info && rb_prev(&right_info->offset_index)) | 1468 | if (right_info && rb_prev(&right_info->offset_index)) |
1470 | left_info = rb_entry(rb_prev(&right_info->offset_index), | 1469 | left_info = rb_entry(rb_prev(&right_info->offset_index), |
1471 | struct btrfs_free_space, offset_index); | 1470 | struct btrfs_free_space, offset_index); |
1472 | else | 1471 | else |
1473 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1472 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
1474 | 1473 | ||
1475 | if (right_info && !right_info->bitmap) { | 1474 | if (right_info && !right_info->bitmap) { |
1476 | if (update_stat) | 1475 | if (update_stat) |
1477 | unlink_free_space(block_group, right_info); | 1476 | unlink_free_space(ctl, right_info); |
1478 | else | 1477 | else |
1479 | __unlink_free_space(block_group, right_info); | 1478 | __unlink_free_space(ctl, right_info); |
1480 | info->bytes += right_info->bytes; | 1479 | info->bytes += right_info->bytes; |
1481 | kmem_cache_free(btrfs_free_space_cachep, right_info); | 1480 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
1482 | merged = true; | 1481 | merged = true; |
@@ -1485,9 +1484,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1485 | if (left_info && !left_info->bitmap && | 1484 | if (left_info && !left_info->bitmap && |
1486 | left_info->offset + left_info->bytes == offset) { | 1485 | left_info->offset + left_info->bytes == offset) { |
1487 | if (update_stat) | 1486 | if (update_stat) |
1488 | unlink_free_space(block_group, left_info); | 1487 | unlink_free_space(ctl, left_info); |
1489 | else | 1488 | else |
1490 | __unlink_free_space(block_group, left_info); | 1489 | __unlink_free_space(ctl, left_info); |
1491 | info->offset = left_info->offset; | 1490 | info->offset = left_info->offset; |
1492 | info->bytes += left_info->bytes; | 1491 | info->bytes += left_info->bytes; |
1493 | kmem_cache_free(btrfs_free_space_cachep, left_info); | 1492 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
@@ -1500,6 +1499,7 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1500 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1499 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
1501 | u64 offset, u64 bytes) | 1500 | u64 offset, u64 bytes) |
1502 | { | 1501 | { |
1502 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1503 | struct btrfs_free_space *info; | 1503 | struct btrfs_free_space *info; |
1504 | int ret = 0; | 1504 | int ret = 0; |
1505 | 1505 | ||
@@ -1510,9 +1510,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1510 | info->offset = offset; | 1510 | info->offset = offset; |
1511 | info->bytes = bytes; | 1511 | info->bytes = bytes; |
1512 | 1512 | ||
1513 | spin_lock(&block_group->tree_lock); | 1513 | spin_lock(&ctl->tree_lock); |
1514 | 1514 | ||
1515 | if (try_merge_free_space(block_group, info, true)) | 1515 | if (try_merge_free_space(ctl, info, true)) |
1516 | goto link; | 1516 | goto link; |
1517 | 1517 | ||
1518 | /* | 1518 | /* |
@@ -1520,7 +1520,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1520 | * extent then we know we're going to have to allocate a new extent, so | 1520 | * extent then we know we're going to have to allocate a new extent, so |
1521 | * before we do that see if we need to drop this into a bitmap | 1521 | * before we do that see if we need to drop this into a bitmap |
1522 | */ | 1522 | */ |
1523 | ret = insert_into_bitmap(block_group, info); | 1523 | ret = insert_into_bitmap(ctl, info); |
1524 | if (ret < 0) { | 1524 | if (ret < 0) { |
1525 | goto out; | 1525 | goto out; |
1526 | } else if (ret) { | 1526 | } else if (ret) { |
@@ -1528,11 +1528,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1528 | goto out; | 1528 | goto out; |
1529 | } | 1529 | } |
1530 | link: | 1530 | link: |
1531 | ret = link_free_space(block_group, info); | 1531 | ret = link_free_space(ctl, info); |
1532 | if (ret) | 1532 | if (ret) |
1533 | kmem_cache_free(btrfs_free_space_cachep, info); | 1533 | kmem_cache_free(btrfs_free_space_cachep, info); |
1534 | out: | 1534 | out: |
1535 | spin_unlock(&block_group->tree_lock); | 1535 | spin_unlock(&ctl->tree_lock); |
1536 | 1536 | ||
1537 | if (ret) { | 1537 | if (ret) { |
1538 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); | 1538 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); |
@@ -1545,21 +1545,21 @@ out: | |||
1545 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 1545 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
1546 | u64 offset, u64 bytes) | 1546 | u64 offset, u64 bytes) |
1547 | { | 1547 | { |
1548 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1548 | struct btrfs_free_space *info; | 1549 | struct btrfs_free_space *info; |
1549 | struct btrfs_free_space *next_info = NULL; | 1550 | struct btrfs_free_space *next_info = NULL; |
1550 | int ret = 0; | 1551 | int ret = 0; |
1551 | 1552 | ||
1552 | spin_lock(&block_group->tree_lock); | 1553 | spin_lock(&ctl->tree_lock); |
1553 | 1554 | ||
1554 | again: | 1555 | again: |
1555 | info = tree_search_offset(block_group, offset, 0, 0); | 1556 | info = tree_search_offset(ctl, offset, 0, 0); |
1556 | if (!info) { | 1557 | if (!info) { |
1557 | /* | 1558 | /* |
1558 | * oops didn't find an extent that matched the space we wanted | 1559 | * oops didn't find an extent that matched the space we wanted |
1559 | * to remove, look for a bitmap instead | 1560 | * to remove, look for a bitmap instead |
1560 | */ | 1561 | */ |
1561 | info = tree_search_offset(block_group, | 1562 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1562 | offset_to_bitmap(block_group, offset), | ||
1563 | 1, 0); | 1563 | 1, 0); |
1564 | if (!info) { | 1564 | if (!info) { |
1565 | WARN_ON(1); | 1565 | WARN_ON(1); |
@@ -1574,8 +1574,8 @@ again: | |||
1574 | offset_index); | 1574 | offset_index); |
1575 | 1575 | ||
1576 | if (next_info->bitmap) | 1576 | if (next_info->bitmap) |
1577 | end = next_info->offset + BITS_PER_BITMAP * | 1577 | end = next_info->offset + |
1578 | block_group->sectorsize - 1; | 1578 | BITS_PER_BITMAP * ctl->unit - 1; |
1579 | else | 1579 | else |
1580 | end = next_info->offset + next_info->bytes; | 1580 | end = next_info->offset + next_info->bytes; |
1581 | 1581 | ||
@@ -1595,20 +1595,20 @@ again: | |||
1595 | } | 1595 | } |
1596 | 1596 | ||
1597 | if (info->bytes == bytes) { | 1597 | if (info->bytes == bytes) { |
1598 | unlink_free_space(block_group, info); | 1598 | unlink_free_space(ctl, info); |
1599 | if (info->bitmap) { | 1599 | if (info->bitmap) { |
1600 | kfree(info->bitmap); | 1600 | kfree(info->bitmap); |
1601 | block_group->total_bitmaps--; | 1601 | ctl->total_bitmaps--; |
1602 | } | 1602 | } |
1603 | kmem_cache_free(btrfs_free_space_cachep, info); | 1603 | kmem_cache_free(btrfs_free_space_cachep, info); |
1604 | goto out_lock; | 1604 | goto out_lock; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | if (!info->bitmap && info->offset == offset) { | 1607 | if (!info->bitmap && info->offset == offset) { |
1608 | unlink_free_space(block_group, info); | 1608 | unlink_free_space(ctl, info); |
1609 | info->offset += bytes; | 1609 | info->offset += bytes; |
1610 | info->bytes -= bytes; | 1610 | info->bytes -= bytes; |
1611 | link_free_space(block_group, info); | 1611 | link_free_space(ctl, info); |
1612 | goto out_lock; | 1612 | goto out_lock; |
1613 | } | 1613 | } |
1614 | 1614 | ||
@@ -1622,13 +1622,13 @@ again: | |||
1622 | * first unlink the old info and then | 1622 | * first unlink the old info and then |
1623 | * insert it again after the hole we're creating | 1623 | * insert it again after the hole we're creating |
1624 | */ | 1624 | */ |
1625 | unlink_free_space(block_group, info); | 1625 | unlink_free_space(ctl, info); |
1626 | if (offset + bytes < info->offset + info->bytes) { | 1626 | if (offset + bytes < info->offset + info->bytes) { |
1627 | u64 old_end = info->offset + info->bytes; | 1627 | u64 old_end = info->offset + info->bytes; |
1628 | 1628 | ||
1629 | info->offset = offset + bytes; | 1629 | info->offset = offset + bytes; |
1630 | info->bytes = old_end - info->offset; | 1630 | info->bytes = old_end - info->offset; |
1631 | ret = link_free_space(block_group, info); | 1631 | ret = link_free_space(ctl, info); |
1632 | WARN_ON(ret); | 1632 | WARN_ON(ret); |
1633 | if (ret) | 1633 | if (ret) |
1634 | goto out_lock; | 1634 | goto out_lock; |
@@ -1638,7 +1638,7 @@ again: | |||
1638 | */ | 1638 | */ |
1639 | kmem_cache_free(btrfs_free_space_cachep, info); | 1639 | kmem_cache_free(btrfs_free_space_cachep, info); |
1640 | } | 1640 | } |
1641 | spin_unlock(&block_group->tree_lock); | 1641 | spin_unlock(&ctl->tree_lock); |
1642 | 1642 | ||
1643 | /* step two, insert a new info struct to cover | 1643 | /* step two, insert a new info struct to cover |
1644 | * anything before the hole | 1644 | * anything before the hole |
@@ -1649,12 +1649,12 @@ again: | |||
1649 | goto out; | 1649 | goto out; |
1650 | } | 1650 | } |
1651 | 1651 | ||
1652 | ret = remove_from_bitmap(block_group, info, &offset, &bytes); | 1652 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
1653 | if (ret == -EAGAIN) | 1653 | if (ret == -EAGAIN) |
1654 | goto again; | 1654 | goto again; |
1655 | BUG_ON(ret); | 1655 | BUG_ON(ret); |
1656 | out_lock: | 1656 | out_lock: |
1657 | spin_unlock(&block_group->tree_lock); | 1657 | spin_unlock(&ctl->tree_lock); |
1658 | out: | 1658 | out: |
1659 | return ret; | 1659 | return ret; |
1660 | } | 1660 | } |
@@ -1662,11 +1662,12 @@ out: | |||
1662 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 1662 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
1663 | u64 bytes) | 1663 | u64 bytes) |
1664 | { | 1664 | { |
1665 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1665 | struct btrfs_free_space *info; | 1666 | struct btrfs_free_space *info; |
1666 | struct rb_node *n; | 1667 | struct rb_node *n; |
1667 | int count = 0; | 1668 | int count = 0; |
1668 | 1669 | ||
1669 | for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { | 1670 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
1670 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1671 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
1671 | if (info->bytes >= bytes) | 1672 | if (info->bytes >= bytes) |
1672 | count++; | 1673 | count++; |
@@ -1681,6 +1682,30 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
1681 | "\n", count); | 1682 | "\n", count); |
1682 | } | 1683 | } |
1683 | 1684 | ||
1685 | static struct btrfs_free_space_op free_space_op = { | ||
1686 | .recalc_thresholds = recalculate_thresholds, | ||
1687 | .use_bitmap = use_bitmap, | ||
1688 | }; | ||
1689 | |||
1690 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) | ||
1691 | { | ||
1692 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1693 | |||
1694 | spin_lock_init(&ctl->tree_lock); | ||
1695 | ctl->unit = block_group->sectorsize; | ||
1696 | ctl->start = block_group->key.objectid; | ||
1697 | ctl->private = block_group; | ||
1698 | ctl->op = &free_space_op; | ||
1699 | |||
1700 | /* | ||
1701 | * we only want to have 32k of ram per block group for keeping | ||
1702 | * track of free space, and if we pass 1/2 of that we want to | ||
1703 | * start converting things over to using bitmaps | ||
1704 | */ | ||
1705 | ctl->extents_thresh = ((1024 * 32) / 2) / | ||
1706 | sizeof(struct btrfs_free_space); | ||
1707 | } | ||
1708 | |||
1684 | /* | 1709 | /* |
1685 | * for a given cluster, put all of its extents back into the free | 1710 | * for a given cluster, put all of its extents back into the free |
1686 | * space cache. If the block group passed doesn't match the block group | 1711 | * space cache. If the block group passed doesn't match the block group |
@@ -1692,6 +1717,7 @@ __btrfs_return_cluster_to_free_space( | |||
1692 | struct btrfs_block_group_cache *block_group, | 1717 | struct btrfs_block_group_cache *block_group, |
1693 | struct btrfs_free_cluster *cluster) | 1718 | struct btrfs_free_cluster *cluster) |
1694 | { | 1719 | { |
1720 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1695 | struct btrfs_free_space *entry; | 1721 | struct btrfs_free_space *entry; |
1696 | struct rb_node *node; | 1722 | struct rb_node *node; |
1697 | 1723 | ||
@@ -1713,8 +1739,8 @@ __btrfs_return_cluster_to_free_space( | |||
1713 | 1739 | ||
1714 | bitmap = (entry->bitmap != NULL); | 1740 | bitmap = (entry->bitmap != NULL); |
1715 | if (!bitmap) | 1741 | if (!bitmap) |
1716 | try_merge_free_space(block_group, entry, false); | 1742 | try_merge_free_space(ctl, entry, false); |
1717 | tree_insert_offset(&block_group->free_space_offset, | 1743 | tree_insert_offset(&ctl->free_space_offset, |
1718 | entry->offset, &entry->offset_index, bitmap); | 1744 | entry->offset, &entry->offset_index, bitmap); |
1719 | } | 1745 | } |
1720 | cluster->root = RB_ROOT; | 1746 | cluster->root = RB_ROOT; |
@@ -1727,12 +1753,13 @@ out: | |||
1727 | 1753 | ||
1728 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | 1754 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) |
1729 | { | 1755 | { |
1756 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1730 | struct btrfs_free_space *info; | 1757 | struct btrfs_free_space *info; |
1731 | struct rb_node *node; | 1758 | struct rb_node *node; |
1732 | struct btrfs_free_cluster *cluster; | 1759 | struct btrfs_free_cluster *cluster; |
1733 | struct list_head *head; | 1760 | struct list_head *head; |
1734 | 1761 | ||
1735 | spin_lock(&block_group->tree_lock); | 1762 | spin_lock(&ctl->tree_lock); |
1736 | while ((head = block_group->cluster_list.next) != | 1763 | while ((head = block_group->cluster_list.next) != |
1737 | &block_group->cluster_list) { | 1764 | &block_group->cluster_list) { |
1738 | cluster = list_entry(head, struct btrfs_free_cluster, | 1765 | cluster = list_entry(head, struct btrfs_free_cluster, |
@@ -1741,57 +1768,58 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
1741 | WARN_ON(cluster->block_group != block_group); | 1768 | WARN_ON(cluster->block_group != block_group); |
1742 | __btrfs_return_cluster_to_free_space(block_group, cluster); | 1769 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
1743 | if (need_resched()) { | 1770 | if (need_resched()) { |
1744 | spin_unlock(&block_group->tree_lock); | 1771 | spin_unlock(&ctl->tree_lock); |
1745 | cond_resched(); | 1772 | cond_resched(); |
1746 | spin_lock(&block_group->tree_lock); | 1773 | spin_lock(&ctl->tree_lock); |
1747 | } | 1774 | } |
1748 | } | 1775 | } |
1749 | 1776 | ||
1750 | while ((node = rb_last(&block_group->free_space_offset)) != NULL) { | 1777 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { |
1751 | info = rb_entry(node, struct btrfs_free_space, offset_index); | 1778 | info = rb_entry(node, struct btrfs_free_space, offset_index); |
1752 | unlink_free_space(block_group, info); | 1779 | unlink_free_space(ctl, info); |
1753 | if (info->bitmap) | 1780 | if (info->bitmap) |
1754 | kfree(info->bitmap); | 1781 | kfree(info->bitmap); |
1755 | kmem_cache_free(btrfs_free_space_cachep, info); | 1782 | kmem_cache_free(btrfs_free_space_cachep, info); |
1756 | if (need_resched()) { | 1783 | if (need_resched()) { |
1757 | spin_unlock(&block_group->tree_lock); | 1784 | spin_unlock(&ctl->tree_lock); |
1758 | cond_resched(); | 1785 | cond_resched(); |
1759 | spin_lock(&block_group->tree_lock); | 1786 | spin_lock(&ctl->tree_lock); |
1760 | } | 1787 | } |
1761 | } | 1788 | } |
1762 | 1789 | ||
1763 | spin_unlock(&block_group->tree_lock); | 1790 | spin_unlock(&ctl->tree_lock); |
1764 | } | 1791 | } |
1765 | 1792 | ||
1766 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 1793 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
1767 | u64 offset, u64 bytes, u64 empty_size) | 1794 | u64 offset, u64 bytes, u64 empty_size) |
1768 | { | 1795 | { |
1796 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1769 | struct btrfs_free_space *entry = NULL; | 1797 | struct btrfs_free_space *entry = NULL; |
1770 | u64 bytes_search = bytes + empty_size; | 1798 | u64 bytes_search = bytes + empty_size; |
1771 | u64 ret = 0; | 1799 | u64 ret = 0; |
1772 | 1800 | ||
1773 | spin_lock(&block_group->tree_lock); | 1801 | spin_lock(&ctl->tree_lock); |
1774 | entry = find_free_space(block_group, &offset, &bytes_search, 0); | 1802 | entry = find_free_space(ctl, &offset, &bytes_search); |
1775 | if (!entry) | 1803 | if (!entry) |
1776 | goto out; | 1804 | goto out; |
1777 | 1805 | ||
1778 | ret = offset; | 1806 | ret = offset; |
1779 | if (entry->bitmap) { | 1807 | if (entry->bitmap) { |
1780 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1808 | bitmap_clear_bits(ctl, entry, offset, bytes); |
1781 | if (!entry->bytes) | 1809 | if (!entry->bytes) |
1782 | free_bitmap(block_group, entry); | 1810 | free_bitmap(ctl, entry); |
1783 | } else { | 1811 | } else { |
1784 | unlink_free_space(block_group, entry); | 1812 | unlink_free_space(ctl, entry); |
1785 | entry->offset += bytes; | 1813 | entry->offset += bytes; |
1786 | entry->bytes -= bytes; | 1814 | entry->bytes -= bytes; |
1787 | if (!entry->bytes) | 1815 | if (!entry->bytes) |
1788 | kmem_cache_free(btrfs_free_space_cachep, entry); | 1816 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1789 | else | 1817 | else |
1790 | link_free_space(block_group, entry); | 1818 | link_free_space(ctl, entry); |
1791 | } | 1819 | } |
1792 | 1820 | ||
1793 | out: | 1821 | out: |
1794 | spin_unlock(&block_group->tree_lock); | 1822 | spin_unlock(&ctl->tree_lock); |
1795 | 1823 | ||
1796 | return ret; | 1824 | return ret; |
1797 | } | 1825 | } |
@@ -1808,6 +1836,7 @@ int btrfs_return_cluster_to_free_space( | |||
1808 | struct btrfs_block_group_cache *block_group, | 1836 | struct btrfs_block_group_cache *block_group, |
1809 | struct btrfs_free_cluster *cluster) | 1837 | struct btrfs_free_cluster *cluster) |
1810 | { | 1838 | { |
1839 | struct btrfs_free_space_ctl *ctl; | ||
1811 | int ret; | 1840 | int ret; |
1812 | 1841 | ||
1813 | /* first, get a safe pointer to the block group */ | 1842 | /* first, get a safe pointer to the block group */ |
@@ -1826,10 +1855,12 @@ int btrfs_return_cluster_to_free_space( | |||
1826 | atomic_inc(&block_group->count); | 1855 | atomic_inc(&block_group->count); |
1827 | spin_unlock(&cluster->lock); | 1856 | spin_unlock(&cluster->lock); |
1828 | 1857 | ||
1858 | ctl = block_group->free_space_ctl; | ||
1859 | |||
1829 | /* now return any extents the cluster had on it */ | 1860 | /* now return any extents the cluster had on it */ |
1830 | spin_lock(&block_group->tree_lock); | 1861 | spin_lock(&ctl->tree_lock); |
1831 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); | 1862 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); |
1832 | spin_unlock(&block_group->tree_lock); | 1863 | spin_unlock(&ctl->tree_lock); |
1833 | 1864 | ||
1834 | /* finally drop our ref */ | 1865 | /* finally drop our ref */ |
1835 | btrfs_put_block_group(block_group); | 1866 | btrfs_put_block_group(block_group); |
@@ -1841,6 +1872,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1841 | struct btrfs_free_space *entry, | 1872 | struct btrfs_free_space *entry, |
1842 | u64 bytes, u64 min_start) | 1873 | u64 bytes, u64 min_start) |
1843 | { | 1874 | { |
1875 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1844 | int err; | 1876 | int err; |
1845 | u64 search_start = cluster->window_start; | 1877 | u64 search_start = cluster->window_start; |
1846 | u64 search_bytes = bytes; | 1878 | u64 search_bytes = bytes; |
@@ -1849,13 +1881,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1849 | search_start = min_start; | 1881 | search_start = min_start; |
1850 | search_bytes = bytes; | 1882 | search_bytes = bytes; |
1851 | 1883 | ||
1852 | err = search_bitmap(block_group, entry, &search_start, | 1884 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); |
1853 | &search_bytes); | ||
1854 | if (err) | 1885 | if (err) |
1855 | return 0; | 1886 | return 0; |
1856 | 1887 | ||
1857 | ret = search_start; | 1888 | ret = search_start; |
1858 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1889 | bitmap_clear_bits(ctl, entry, ret, bytes); |
1859 | 1890 | ||
1860 | return ret; | 1891 | return ret; |
1861 | } | 1892 | } |
@@ -1869,6 +1900,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1869 | struct btrfs_free_cluster *cluster, u64 bytes, | 1900 | struct btrfs_free_cluster *cluster, u64 bytes, |
1870 | u64 min_start) | 1901 | u64 min_start) |
1871 | { | 1902 | { |
1903 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1872 | struct btrfs_free_space *entry = NULL; | 1904 | struct btrfs_free_space *entry = NULL; |
1873 | struct rb_node *node; | 1905 | struct rb_node *node; |
1874 | u64 ret = 0; | 1906 | u64 ret = 0; |
@@ -1929,20 +1961,20 @@ out: | |||
1929 | if (!ret) | 1961 | if (!ret) |
1930 | return 0; | 1962 | return 0; |
1931 | 1963 | ||
1932 | spin_lock(&block_group->tree_lock); | 1964 | spin_lock(&ctl->tree_lock); |
1933 | 1965 | ||
1934 | block_group->free_space -= bytes; | 1966 | ctl->free_space -= bytes; |
1935 | if (entry->bytes == 0) { | 1967 | if (entry->bytes == 0) { |
1936 | block_group->free_extents--; | 1968 | ctl->free_extents--; |
1937 | if (entry->bitmap) { | 1969 | if (entry->bitmap) { |
1938 | kfree(entry->bitmap); | 1970 | kfree(entry->bitmap); |
1939 | block_group->total_bitmaps--; | 1971 | ctl->total_bitmaps--; |
1940 | recalculate_thresholds(block_group); | 1972 | ctl->op->recalc_thresholds(ctl); |
1941 | } | 1973 | } |
1942 | kmem_cache_free(btrfs_free_space_cachep, entry); | 1974 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1943 | } | 1975 | } |
1944 | 1976 | ||
1945 | spin_unlock(&block_group->tree_lock); | 1977 | spin_unlock(&ctl->tree_lock); |
1946 | 1978 | ||
1947 | return ret; | 1979 | return ret; |
1948 | } | 1980 | } |
@@ -1952,6 +1984,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
1952 | struct btrfs_free_cluster *cluster, | 1984 | struct btrfs_free_cluster *cluster, |
1953 | u64 offset, u64 bytes, u64 min_bytes) | 1985 | u64 offset, u64 bytes, u64 min_bytes) |
1954 | { | 1986 | { |
1987 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1955 | unsigned long next_zero; | 1988 | unsigned long next_zero; |
1956 | unsigned long i; | 1989 | unsigned long i; |
1957 | unsigned long search_bits; | 1990 | unsigned long search_bits; |
@@ -2006,7 +2039,7 @@ again: | |||
2006 | 2039 | ||
2007 | cluster->window_start = start * block_group->sectorsize + | 2040 | cluster->window_start = start * block_group->sectorsize + |
2008 | entry->offset; | 2041 | entry->offset; |
2009 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2042 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2010 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2043 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2011 | &entry->offset_index, 1); | 2044 | &entry->offset_index, 1); |
2012 | BUG_ON(ret); | 2045 | BUG_ON(ret); |
@@ -2021,6 +2054,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2021 | struct btrfs_free_cluster *cluster, | 2054 | struct btrfs_free_cluster *cluster, |
2022 | u64 offset, u64 bytes, u64 min_bytes) | 2055 | u64 offset, u64 bytes, u64 min_bytes) |
2023 | { | 2056 | { |
2057 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2024 | struct btrfs_free_space *first = NULL; | 2058 | struct btrfs_free_space *first = NULL; |
2025 | struct btrfs_free_space *entry = NULL; | 2059 | struct btrfs_free_space *entry = NULL; |
2026 | struct btrfs_free_space *prev = NULL; | 2060 | struct btrfs_free_space *prev = NULL; |
@@ -2031,7 +2065,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2031 | u64 max_extent; | 2065 | u64 max_extent; |
2032 | u64 max_gap = 128 * 1024; | 2066 | u64 max_gap = 128 * 1024; |
2033 | 2067 | ||
2034 | entry = tree_search_offset(block_group, offset, 0, 1); | 2068 | entry = tree_search_offset(ctl, offset, 0, 1); |
2035 | if (!entry) | 2069 | if (!entry) |
2036 | return -ENOSPC; | 2070 | return -ENOSPC; |
2037 | 2071 | ||
@@ -2097,7 +2131,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2097 | if (entry->bitmap) | 2131 | if (entry->bitmap) |
2098 | continue; | 2132 | continue; |
2099 | 2133 | ||
2100 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2134 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2101 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2135 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2102 | &entry->offset_index, 0); | 2136 | &entry->offset_index, 0); |
2103 | BUG_ON(ret); | 2137 | BUG_ON(ret); |
@@ -2116,16 +2150,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2116 | struct btrfs_free_cluster *cluster, | 2150 | struct btrfs_free_cluster *cluster, |
2117 | u64 offset, u64 bytes, u64 min_bytes) | 2151 | u64 offset, u64 bytes, u64 min_bytes) |
2118 | { | 2152 | { |
2153 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2119 | struct btrfs_free_space *entry; | 2154 | struct btrfs_free_space *entry; |
2120 | struct rb_node *node; | 2155 | struct rb_node *node; |
2121 | int ret = -ENOSPC; | 2156 | int ret = -ENOSPC; |
2122 | 2157 | ||
2123 | if (block_group->total_bitmaps == 0) | 2158 | if (ctl->total_bitmaps == 0) |
2124 | return -ENOSPC; | 2159 | return -ENOSPC; |
2125 | 2160 | ||
2126 | entry = tree_search_offset(block_group, | 2161 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); |
2127 | offset_to_bitmap(block_group, offset), | ||
2128 | 0, 1); | ||
2129 | if (!entry) | 2162 | if (!entry) |
2130 | return -ENOSPC; | 2163 | return -ENOSPC; |
2131 | 2164 | ||
@@ -2158,6 +2191,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2158 | struct btrfs_free_cluster *cluster, | 2191 | struct btrfs_free_cluster *cluster, |
2159 | u64 offset, u64 bytes, u64 empty_size) | 2192 | u64 offset, u64 bytes, u64 empty_size) |
2160 | { | 2193 | { |
2194 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2161 | u64 min_bytes; | 2195 | u64 min_bytes; |
2162 | int ret; | 2196 | int ret; |
2163 | 2197 | ||
@@ -2177,14 +2211,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2177 | } else | 2211 | } else |
2178 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | 2212 | min_bytes = max(bytes, (bytes + empty_size) >> 2); |
2179 | 2213 | ||
2180 | spin_lock(&block_group->tree_lock); | 2214 | spin_lock(&ctl->tree_lock); |
2181 | 2215 | ||
2182 | /* | 2216 | /* |
2183 | * If we know we don't have enough space to make a cluster don't even | 2217 | * If we know we don't have enough space to make a cluster don't even |
2184 | * bother doing all the work to try and find one. | 2218 | * bother doing all the work to try and find one. |
2185 | */ | 2219 | */ |
2186 | if (block_group->free_space < min_bytes) { | 2220 | if (ctl->free_space < min_bytes) { |
2187 | spin_unlock(&block_group->tree_lock); | 2221 | spin_unlock(&ctl->tree_lock); |
2188 | return -ENOSPC; | 2222 | return -ENOSPC; |
2189 | } | 2223 | } |
2190 | 2224 | ||
@@ -2210,7 +2244,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2210 | } | 2244 | } |
2211 | out: | 2245 | out: |
2212 | spin_unlock(&cluster->lock); | 2246 | spin_unlock(&cluster->lock); |
2213 | spin_unlock(&block_group->tree_lock); | 2247 | spin_unlock(&ctl->tree_lock); |
2214 | 2248 | ||
2215 | return ret; | 2249 | return ret; |
2216 | } | 2250 | } |
@@ -2231,6 +2265,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
2231 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | 2265 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, |
2232 | u64 *trimmed, u64 start, u64 end, u64 minlen) | 2266 | u64 *trimmed, u64 start, u64 end, u64 minlen) |
2233 | { | 2267 | { |
2268 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2234 | struct btrfs_free_space *entry = NULL; | 2269 | struct btrfs_free_space *entry = NULL; |
2235 | struct btrfs_fs_info *fs_info = block_group->fs_info; | 2270 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2236 | u64 bytes = 0; | 2271 | u64 bytes = 0; |
@@ -2240,52 +2275,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2240 | *trimmed = 0; | 2275 | *trimmed = 0; |
2241 | 2276 | ||
2242 | while (start < end) { | 2277 | while (start < end) { |
2243 | spin_lock(&block_group->tree_lock); | 2278 | spin_lock(&ctl->tree_lock); |
2244 | 2279 | ||
2245 | if (block_group->free_space < minlen) { | 2280 | if (ctl->free_space < minlen) { |
2246 | spin_unlock(&block_group->tree_lock); | 2281 | spin_unlock(&ctl->tree_lock); |
2247 | break; | 2282 | break; |
2248 | } | 2283 | } |
2249 | 2284 | ||
2250 | entry = tree_search_offset(block_group, start, 0, 1); | 2285 | entry = tree_search_offset(ctl, start, 0, 1); |
2251 | if (!entry) | 2286 | if (!entry) |
2252 | entry = tree_search_offset(block_group, | 2287 | entry = tree_search_offset(ctl, |
2253 | offset_to_bitmap(block_group, | 2288 | offset_to_bitmap(ctl, start), |
2254 | start), | ||
2255 | 1, 1); | 2289 | 1, 1); |
2256 | 2290 | ||
2257 | if (!entry || entry->offset >= end) { | 2291 | if (!entry || entry->offset >= end) { |
2258 | spin_unlock(&block_group->tree_lock); | 2292 | spin_unlock(&ctl->tree_lock); |
2259 | break; | 2293 | break; |
2260 | } | 2294 | } |
2261 | 2295 | ||
2262 | if (entry->bitmap) { | 2296 | if (entry->bitmap) { |
2263 | ret = search_bitmap(block_group, entry, &start, &bytes); | 2297 | ret = search_bitmap(ctl, entry, &start, &bytes); |
2264 | if (!ret) { | 2298 | if (!ret) { |
2265 | if (start >= end) { | 2299 | if (start >= end) { |
2266 | spin_unlock(&block_group->tree_lock); | 2300 | spin_unlock(&ctl->tree_lock); |
2267 | break; | 2301 | break; |
2268 | } | 2302 | } |
2269 | bytes = min(bytes, end - start); | 2303 | bytes = min(bytes, end - start); |
2270 | bitmap_clear_bits(block_group, entry, | 2304 | bitmap_clear_bits(ctl, entry, start, bytes); |
2271 | start, bytes); | ||
2272 | if (entry->bytes == 0) | 2305 | if (entry->bytes == 0) |
2273 | free_bitmap(block_group, entry); | 2306 | free_bitmap(ctl, entry); |
2274 | } else { | 2307 | } else { |
2275 | start = entry->offset + BITS_PER_BITMAP * | 2308 | start = entry->offset + BITS_PER_BITMAP * |
2276 | block_group->sectorsize; | 2309 | block_group->sectorsize; |
2277 | spin_unlock(&block_group->tree_lock); | 2310 | spin_unlock(&ctl->tree_lock); |
2278 | ret = 0; | 2311 | ret = 0; |
2279 | continue; | 2312 | continue; |
2280 | } | 2313 | } |
2281 | } else { | 2314 | } else { |
2282 | start = entry->offset; | 2315 | start = entry->offset; |
2283 | bytes = min(entry->bytes, end - start); | 2316 | bytes = min(entry->bytes, end - start); |
2284 | unlink_free_space(block_group, entry); | 2317 | unlink_free_space(ctl, entry); |
2285 | kfree(entry); | 2318 | kfree(entry); |
2286 | } | 2319 | } |
2287 | 2320 | ||
2288 | spin_unlock(&block_group->tree_lock); | 2321 | spin_unlock(&ctl->tree_lock); |
2289 | 2322 | ||
2290 | if (bytes >= minlen) { | 2323 | if (bytes >= minlen) { |
2291 | int update_ret; | 2324 | int update_ret; |
@@ -2297,8 +2330,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2297 | bytes, | 2330 | bytes, |
2298 | &actually_trimmed); | 2331 | &actually_trimmed); |
2299 | 2332 | ||
2300 | btrfs_add_free_space(block_group, | 2333 | btrfs_add_free_space(block_group, start, bytes); |
2301 | start, bytes); | ||
2302 | if (!update_ret) | 2334 | if (!update_ret) |
2303 | btrfs_update_reserved_bytes(block_group, | 2335 | btrfs_update_reserved_bytes(block_group, |
2304 | bytes, 0, 1); | 2336 | bytes, 0, 1); |
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 12b2b5165f8a..a64a23fae1eb 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -27,6 +27,25 @@ struct btrfs_free_space { | |||
27 | struct list_head list; | 27 | struct list_head list; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct btrfs_free_space_ctl { | ||
31 | spinlock_t tree_lock; | ||
32 | struct rb_root free_space_offset; | ||
33 | u64 free_space; | ||
34 | int extents_thresh; | ||
35 | int free_extents; | ||
36 | int total_bitmaps; | ||
37 | int unit; | ||
38 | u64 start; | ||
39 | struct btrfs_free_space_op *op; | ||
40 | void *private; | ||
41 | }; | ||
42 | |||
43 | struct btrfs_free_space_op { | ||
44 | void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl); | ||
45 | bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl, | ||
46 | struct btrfs_free_space *info); | ||
47 | }; | ||
48 | |||
30 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | 49 | struct inode *lookup_free_space_inode(struct btrfs_root *root, |
31 | struct btrfs_block_group_cache | 50 | struct btrfs_block_group_cache |
32 | *block_group, struct btrfs_path *path); | 51 | *block_group, struct btrfs_path *path); |
@@ -45,6 +64,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
45 | struct btrfs_trans_handle *trans, | 64 | struct btrfs_trans_handle *trans, |
46 | struct btrfs_block_group_cache *block_group, | 65 | struct btrfs_block_group_cache *block_group, |
47 | struct btrfs_path *path); | 66 | struct btrfs_path *path); |
67 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); | ||
48 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 68 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
49 | u64 bytenr, u64 size); | 69 | u64 bytenr, u64 size); |
50 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 70 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |