aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2011-09-02 03:57:07 -0400
committerDavid Sterba <dsterba@suse.cz>2011-10-20 12:10:39 -0400
commit008873eafbc77deb1702aedece33756c58486c6a (patch)
tree2b578bfdb47f8351f33d8ebf29714d4f2e4b15f6 /fs
parent83c8c9bde0add721f7509aa446455183b040b931 (diff)
Btrfs: honor extent thresh during defragmentation
We won't defrag an extent, if it's bigger than the threshold we specified and there's no small extent before it, but actually the code doesn't work this way. There are three bugs: - When should_defrag_range() decides we should keep on defragmenting an extent, last_len is not incremented. (old bug) - The length that passes to should_defrag_range() is not the length we're going to defrag. (new bug) - We always defrag 256K bytes data, and a big extent can be part of this range. (new bug) For a file with 4 extents: | 4K | 4K | 256K | 256K | The result of defrag with (the default) 256K extent thresh should be: | 264K | 256K | but with those bugs, we'll get: | 520K | Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ioctl.c37
1 files changed, 26 insertions, 11 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index f9026413bcf1..d524b6697ad9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -765,7 +765,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
765 int ret = 1; 765 int ret = 1;
766 766
767 /* 767 /*
768 * make sure that once we start defragging and extent, we keep on 768 * make sure that once we start defragging an extent, we keep on
769 * defragging it 769 * defragging it
770 */ 770 */
771 if (start < *defrag_end) 771 if (start < *defrag_end)
@@ -810,7 +810,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
810 * extent will force at least part of that big extent to be defragged. 810 * extent will force at least part of that big extent to be defragged.
811 */ 811 */
812 if (ret) { 812 if (ret) {
813 *last_len += len;
814 *defrag_end = extent_map_end(em); 813 *defrag_end = extent_map_end(em);
815 } else { 814 } else {
816 *last_len = 0; 815 *last_len = 0;
@@ -984,13 +983,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
984 u64 skip = 0; 983 u64 skip = 0;
985 u64 defrag_end = 0; 984 u64 defrag_end = 0;
986 u64 newer_off = range->start; 985 u64 newer_off = range->start;
987 int newer_left = 0;
988 unsigned long i; 986 unsigned long i;
987 unsigned long ra_index = 0;
989 int ret; 988 int ret;
990 int defrag_count = 0; 989 int defrag_count = 0;
991 int compress_type = BTRFS_COMPRESS_ZLIB; 990 int compress_type = BTRFS_COMPRESS_ZLIB;
992 int extent_thresh = range->extent_thresh; 991 int extent_thresh = range->extent_thresh;
993 int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; 992 int max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
993 int cluster = max_cluster;
994 u64 new_align = ~((u64)128 * 1024 - 1); 994 u64 new_align = ~((u64)128 * 1024 - 1);
995 struct page **pages = NULL; 995 struct page **pages = NULL;
996 996
@@ -1020,7 +1020,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1020 ra = &file->f_ra; 1020 ra = &file->f_ra;
1021 } 1021 }
1022 1022
1023 pages = kmalloc(sizeof(struct page *) * newer_cluster, 1023 pages = kmalloc(sizeof(struct page *) * max_cluster,
1024 GFP_NOFS); 1024 GFP_NOFS);
1025 if (!pages) { 1025 if (!pages) {
1026 ret = -ENOMEM; 1026 ret = -ENOMEM;
@@ -1045,7 +1045,6 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1045 * the extents in the file evenly spaced 1045 * the extents in the file evenly spaced
1046 */ 1046 */
1047 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1047 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
1048 newer_left = newer_cluster;
1049 } else 1048 } else
1050 goto out_ra; 1049 goto out_ra;
1051 } else { 1050 } else {
@@ -1077,12 +1076,26 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1077 i = max(i + 1, next); 1076 i = max(i + 1, next);
1078 continue; 1077 continue;
1079 } 1078 }
1079
1080 if (!newer_than) {
1081 cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
1082 PAGE_CACHE_SHIFT) - i;
1083 cluster = min(cluster, max_cluster);
1084 } else {
1085 cluster = max_cluster;
1086 }
1087
1080 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) 1088 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
1081 BTRFS_I(inode)->force_compress = compress_type; 1089 BTRFS_I(inode)->force_compress = compress_type;
1082 1090
1083 btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster); 1091 if (i + cluster > ra_index) {
1092 ra_index = max(i, ra_index);
1093 btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
1094 cluster);
1095 ra_index += max_cluster;
1096 }
1084 1097
1085 ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster); 1098 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1086 if (ret < 0) 1099 if (ret < 0)
1087 goto out_ra; 1100 goto out_ra;
1088 1101
@@ -1102,15 +1115,17 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1102 if (!ret) { 1115 if (!ret) {
1103 range->start = newer_off; 1116 range->start = newer_off;
1104 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; 1117 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
1105 newer_left = newer_cluster;
1106 } else { 1118 } else {
1107 break; 1119 break;
1108 } 1120 }
1109 } else { 1121 } else {
1110 if (ret > 0) 1122 if (ret > 0) {
1111 i += ret; 1123 i += ret;
1112 else 1124 last_len += ret << PAGE_CACHE_SHIFT;
1125 } else {
1113 i++; 1126 i++;
1127 last_len = 0;
1128 }
1114 } 1129 }
1115 } 1130 }
1116 1131