aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-03-11 09:42:04 -0500
committerChris Mason <chris.mason@oracle.com>2010-03-15 11:00:10 -0400
commit1e701a3292e25a6c4939cad9f24951dc6b6ad853 (patch)
tree899887a99aae82fe113bffedccb90a76e5473f8b /fs
parent940100a4a7b78b27e60a3e72340fb9b5397dcdb2 (diff)
Btrfs: add new defrag-range ioctl.
The btrfs defrag ioctl was limited to doing the entire file. This commit adds a new interface that can defrag a specific range inside the file. It can also force compression on the file, allowing you to selectively compress individual files after they were created, even when mount -o compress isn't turned on. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/btrfs_inode.h5
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/inode.c11
-rw-r--r--fs/btrfs/ioctl.c83
-rw-r--r--fs/btrfs/ioctl.h31
5 files changed, 117 insertions, 14 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 3f1f50d9d916..7a4dee199832 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -153,6 +153,11 @@ struct btrfs_inode {
153 unsigned ordered_data_close:1; 153 unsigned ordered_data_close:1;
154 unsigned dummy_inode:1; 154 unsigned dummy_inode:1;
155 155
156 /*
157 * always compress this one file
158 */
159 unsigned force_compress:1;
160
156 struct inode vfs_inode; 161 struct inode vfs_inode;
157}; 162};
158 163
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 1166b15e9bf6..3a36b1fb553a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1184,7 +1184,6 @@ struct btrfs_root {
1184#define BTRFS_INODE_NOATIME (1 << 9) 1184#define BTRFS_INODE_NOATIME (1 << 9)
1185#define BTRFS_INODE_DIRSYNC (1 << 10) 1185#define BTRFS_INODE_DIRSYNC (1 << 10)
1186 1186
1187
1188/* some macros to generate set/get funcs for the struct fields. This 1187/* some macros to generate set/get funcs for the struct fields. This
1189 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1188 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
1190 * one for u8: 1189 * one for u8:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7d10d1ccb0fe..3657925c2461 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -379,7 +379,8 @@ again:
379 * change at any time if we discover bad compression ratios. 379 * change at any time if we discover bad compression ratios.
380 */ 380 */
381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && 381 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
382 btrfs_test_opt(root, COMPRESS)) { 382 (btrfs_test_opt(root, COMPRESS) ||
383 (BTRFS_I(inode)->force_compress))) {
383 WARN_ON(pages); 384 WARN_ON(pages);
384 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); 385 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
385 386
@@ -483,8 +484,10 @@ again:
483 nr_pages_ret = 0; 484 nr_pages_ret = 0;
484 485
485 /* flag the file so we don't compress in the future */ 486 /* flag the file so we don't compress in the future */
486 if (!btrfs_test_opt(root, FORCE_COMPRESS)) 487 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
488 !(BTRFS_I(inode)->force_compress)) {
487 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; 489 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
490 }
488 } 491 }
489 if (will_compress) { 492 if (will_compress) {
490 *num_added += 1; 493 *num_added += 1;
@@ -1211,7 +1214,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1211 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1214 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1212 ret = run_delalloc_nocow(inode, locked_page, start, end, 1215 ret = run_delalloc_nocow(inode, locked_page, start, end,
1213 page_started, 0, nr_written); 1216 page_started, 0, nr_written);
1214 else if (!btrfs_test_opt(root, COMPRESS)) 1217 else if (!btrfs_test_opt(root, COMPRESS) &&
1218 !(BTRFS_I(inode)->force_compress))
1215 ret = cow_file_range(inode, locked_page, start, end, 1219 ret = cow_file_range(inode, locked_page, start, end,
1216 page_started, nr_written, 1); 1220 page_started, nr_written, 1);
1217 else 1221 else
@@ -3639,6 +3643,7 @@ static noinline void init_btrfs_i(struct inode *inode)
3639 bi->index_cnt = (u64)-1; 3643 bi->index_cnt = (u64)-1;
3640 bi->last_unlink_trans = 0; 3644 bi->last_unlink_trans = 0;
3641 bi->ordered_data_close = 0; 3645 bi->ordered_data_close = 0;
3646 bi->force_compress = 0;
3642 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); 3647 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3643 extent_io_tree_init(&BTRFS_I(inode)->io_tree, 3648 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3644 inode->i_mapping, GFP_NOFS); 3649 inode->i_mapping, GFP_NOFS);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3a89cd77f307..d866b460c26e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -476,13 +476,18 @@ out_unlock:
476} 476}
477 477
478static int should_defrag_range(struct inode *inode, u64 start, u64 len, 478static int should_defrag_range(struct inode *inode, u64 start, u64 len,
479 u64 *last_len, u64 *skip, u64 *defrag_end) 479 int thresh, u64 *last_len, u64 *skip,
480 u64 *defrag_end)
480{ 481{
481 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 482 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
482 struct extent_map *em = NULL; 483 struct extent_map *em = NULL;
483 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 484 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
484 int ret = 1; 485 int ret = 1;
485 486
487
488 if (thresh == 0)
489 thresh = 256 * 1024;
490
486 /* 491 /*
487 * make sure that once we start defragging and extent, we keep on 492 * make sure that once we start defragging and extent, we keep on
488 * defragging it 493 * defragging it
@@ -517,8 +522,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
517 /* 522 /*
518 * we hit a real extent, if it is big don't bother defragging it again 523 * we hit a real extent, if it is big don't bother defragging it again
519 */ 524 */
520 if ((*last_len == 0 || *last_len >= 256 * 1024) && 525 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
521 em->len >= 256 * 1024)
522 ret = 0; 526 ret = 0;
523 527
524 /* 528 /*
@@ -542,7 +546,8 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
542 return ret; 546 return ret;
543} 547}
544 548
545static int btrfs_defrag_file(struct file *file) 549static int btrfs_defrag_file(struct file *file,
550 struct btrfs_ioctl_defrag_range_args *range)
546{ 551{
547 struct inode *inode = fdentry(file)->d_inode; 552 struct inode *inode = fdentry(file)->d_inode;
548 struct btrfs_root *root = BTRFS_I(inode)->root; 553 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -563,11 +568,19 @@ static int btrfs_defrag_file(struct file *file)
563 if (inode->i_size == 0) 568 if (inode->i_size == 0)
564 return 0; 569 return 0;
565 570
566 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 571 if (range->start + range->len > range->start) {
567 i = 0; 572 last_index = min_t(u64, inode->i_size - 1,
573 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
574 } else {
575 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
576 }
577
578 i = range->start >> PAGE_CACHE_SHIFT;
568 while (i <= last_index) { 579 while (i <= last_index) {
569 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 580 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
570 PAGE_CACHE_SIZE, &last_len, &skip, 581 PAGE_CACHE_SIZE,
582 range->extent_thresh,
583 &last_len, &skip,
571 &defrag_end)) { 584 &defrag_end)) {
572 unsigned long next; 585 unsigned long next;
573 /* 586 /*
@@ -585,6 +598,8 @@ static int btrfs_defrag_file(struct file *file)
585 } 598 }
586 total_read++; 599 total_read++;
587 mutex_lock(&inode->i_mutex); 600 mutex_lock(&inode->i_mutex);
601 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
602 BTRFS_I(inode)->force_compress = 1;
588 603
589 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); 604 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
590 if (ret) { 605 if (ret) {
@@ -673,6 +688,28 @@ loop_unlock:
673 i++; 688 i++;
674 } 689 }
675 690
691 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
692 filemap_flush(inode->i_mapping);
693
694 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
695 /* the filemap_flush will queue IO into the worker threads, but
696 * we have to make sure the IO is actually started and that
697 * ordered extents get created before we return
698 */
699 atomic_inc(&root->fs_info->async_submit_draining);
700 while (atomic_read(&root->fs_info->nr_async_submits) ||
701 atomic_read(&root->fs_info->async_delalloc_pages)) {
702 wait_event(root->fs_info->async_submit_wait,
703 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
704 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
705 }
706 atomic_dec(&root->fs_info->async_submit_draining);
707
708 mutex_lock(&inode->i_mutex);
709 BTRFS_I(inode)->force_compress = 0;
710 mutex_unlock(&inode->i_mutex);
711 }
712
676 return 0; 713 return 0;
677 714
678err_reservations: 715err_reservations:
@@ -1284,10 +1321,11 @@ out:
1284 return err; 1321 return err;
1285} 1322}
1286 1323
1287static int btrfs_ioctl_defrag(struct file *file) 1324static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
1288{ 1325{
1289 struct inode *inode = fdentry(file)->d_inode; 1326 struct inode *inode = fdentry(file)->d_inode;
1290 struct btrfs_root *root = BTRFS_I(inode)->root; 1327 struct btrfs_root *root = BTRFS_I(inode)->root;
1328 struct btrfs_ioctl_defrag_range_args *range;
1291 int ret; 1329 int ret;
1292 1330
1293 ret = mnt_want_write(file->f_path.mnt); 1331 ret = mnt_want_write(file->f_path.mnt);
@@ -1308,7 +1346,30 @@ static int btrfs_ioctl_defrag(struct file *file)
1308 ret = -EINVAL; 1346 ret = -EINVAL;
1309 goto out; 1347 goto out;
1310 } 1348 }
1311 btrfs_defrag_file(file); 1349
1350 range = kzalloc(sizeof(*range), GFP_KERNEL);
1351 if (!range) {
1352 ret = -ENOMEM;
1353 goto out;
1354 }
1355
1356 if (argp) {
1357 if (copy_from_user(range, argp,
1358 sizeof(*range))) {
1359 ret = -EFAULT;
1360 kfree(range);
1361 }
1362 /* compression requires us to start the IO */
1363 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1364 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
1365 range->extent_thresh = (u32)-1;
1366 }
1367 } else {
1368 /* the rest are all set to zero by kzalloc */
1369 range->len = (u64)-1;
1370 }
1371 btrfs_defrag_file(file, range);
1372 kfree(range);
1312 break; 1373 break;
1313 } 1374 }
1314out: 1375out:
@@ -1831,7 +1892,9 @@ long btrfs_ioctl(struct file *file, unsigned int
1831 case BTRFS_IOC_DEFAULT_SUBVOL: 1892 case BTRFS_IOC_DEFAULT_SUBVOL:
1832 return btrfs_ioctl_default_subvol(file, argp); 1893 return btrfs_ioctl_default_subvol(file, argp);
1833 case BTRFS_IOC_DEFRAG: 1894 case BTRFS_IOC_DEFRAG:
1834 return btrfs_ioctl_defrag(file); 1895 return btrfs_ioctl_defrag(file, NULL);
1896 case BTRFS_IOC_DEFRAG_RANGE:
1897 return btrfs_ioctl_defrag(file, argp);
1835 case BTRFS_IOC_RESIZE: 1898 case BTRFS_IOC_RESIZE:
1836 return btrfs_ioctl_resize(root, argp); 1899 return btrfs_ioctl_resize(root, argp);
1837 case BTRFS_IOC_ADD_DEV: 1900 case BTRFS_IOC_ADD_DEV:
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index f1923e0260e3..2d64a65842f3 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -99,6 +99,35 @@ struct btrfs_ioctl_clone_range_args {
99 __u64 dest_offset; 99 __u64 dest_offset;
100}; 100};
101 101
102/* flags for the defrag range ioctl */
103#define BTRFS_DEFRAG_RANGE_COMPRESS 1
104#define BTRFS_DEFRAG_RANGE_START_IO 2
105
106struct btrfs_ioctl_defrag_range_args {
107 /* start of the defrag operation */
108 __u64 start;
109
110 /* number of bytes to defrag, use (u64)-1 to say all */
111 __u64 len;
112
113 /*
114 * flags for the operation, which can include turning
115 * on compression for this one defrag
116 */
117 __u64 flags;
118
119 /*
120 * any extent bigger than this will be considered
121 * already defragged. Use 0 to take the kernel default
122 * Use 1 to say every single extent must be rewritten
123 */
124 __u32 extent_thresh;
125
126 /* spare for later */
127 __u32 unused[5];
128};
129
130
102#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ 131#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
103 struct btrfs_ioctl_vol_args) 132 struct btrfs_ioctl_vol_args)
104#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ 133#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -130,6 +159,8 @@ struct btrfs_ioctl_clone_range_args {
130 struct btrfs_ioctl_vol_args) 159 struct btrfs_ioctl_vol_args)
131#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ 160#define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \
132 struct btrfs_ioctl_vol_args) 161 struct btrfs_ioctl_vol_args)
162#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \
163 struct btrfs_ioctl_defrag_range_args)
133#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \ 164#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \
134 struct btrfs_ioctl_search_args) 165 struct btrfs_ioctl_search_args)
135#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ 166#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \