aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ioctl.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2010-03-11 09:42:04 -0500
committerChris Mason <chris.mason@oracle.com>2010-03-15 11:00:10 -0400
commit1e701a3292e25a6c4939cad9f24951dc6b6ad853 (patch)
tree899887a99aae82fe113bffedccb90a76e5473f8b /fs/btrfs/ioctl.c
parent940100a4a7b78b27e60a3e72340fb9b5397dcdb2 (diff)
Btrfs: add new defrag-range ioctl.
The btrfs defrag ioctl was limited to doing the entire file. This commit adds a new interface that can defrag a specific range inside the file. It can also force compression on the file, allowing you to selectively compress individual files after they were created, even when mount -o compress isn't turned on. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ioctl.c')
-rw-r--r--fs/btrfs/ioctl.c83
1 files changed, 73 insertions, 10 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 3a89cd77f307..d866b460c26e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -476,13 +476,18 @@ out_unlock:
476} 476}
477 477
478static int should_defrag_range(struct inode *inode, u64 start, u64 len, 478static int should_defrag_range(struct inode *inode, u64 start, u64 len,
479 u64 *last_len, u64 *skip, u64 *defrag_end) 479 int thresh, u64 *last_len, u64 *skip,
480 u64 *defrag_end)
480{ 481{
481 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 482 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
482 struct extent_map *em = NULL; 483 struct extent_map *em = NULL;
483 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 484 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
484 int ret = 1; 485 int ret = 1;
485 486
487
488 if (thresh == 0)
489 thresh = 256 * 1024;
490
486 /* 491 /*
487 * make sure that once we start defragging and extent, we keep on 492 * make sure that once we start defragging and extent, we keep on
488 * defragging it 493 * defragging it
@@ -517,8 +522,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
517 /* 522 /*
518 * we hit a real extent, if it is big don't bother defragging it again 523 * we hit a real extent, if it is big don't bother defragging it again
519 */ 524 */
520 if ((*last_len == 0 || *last_len >= 256 * 1024) && 525 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
521 em->len >= 256 * 1024)
522 ret = 0; 526 ret = 0;
523 527
524 /* 528 /*
@@ -542,7 +546,8 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
542 return ret; 546 return ret;
543} 547}
544 548
545static int btrfs_defrag_file(struct file *file) 549static int btrfs_defrag_file(struct file *file,
550 struct btrfs_ioctl_defrag_range_args *range)
546{ 551{
547 struct inode *inode = fdentry(file)->d_inode; 552 struct inode *inode = fdentry(file)->d_inode;
548 struct btrfs_root *root = BTRFS_I(inode)->root; 553 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -563,11 +568,19 @@ static int btrfs_defrag_file(struct file *file)
563 if (inode->i_size == 0) 568 if (inode->i_size == 0)
564 return 0; 569 return 0;
565 570
566 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; 571 if (range->start + range->len > range->start) {
567 i = 0; 572 last_index = min_t(u64, inode->i_size - 1,
573 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
574 } else {
575 last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
576 }
577
578 i = range->start >> PAGE_CACHE_SHIFT;
568 while (i <= last_index) { 579 while (i <= last_index) {
569 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 580 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
570 PAGE_CACHE_SIZE, &last_len, &skip, 581 PAGE_CACHE_SIZE,
582 range->extent_thresh,
583 &last_len, &skip,
571 &defrag_end)) { 584 &defrag_end)) {
572 unsigned long next; 585 unsigned long next;
573 /* 586 /*
@@ -585,6 +598,8 @@ static int btrfs_defrag_file(struct file *file)
585 } 598 }
586 total_read++; 599 total_read++;
587 mutex_lock(&inode->i_mutex); 600 mutex_lock(&inode->i_mutex);
601 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
602 BTRFS_I(inode)->force_compress = 1;
588 603
589 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); 604 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
590 if (ret) { 605 if (ret) {
@@ -673,6 +688,28 @@ loop_unlock:
673 i++; 688 i++;
674 } 689 }
675 690
691 if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
692 filemap_flush(inode->i_mapping);
693
694 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
695 /* the filemap_flush will queue IO into the worker threads, but
696 * we have to make sure the IO is actually started and that
697 * ordered extents get created before we return
698 */
699 atomic_inc(&root->fs_info->async_submit_draining);
700 while (atomic_read(&root->fs_info->nr_async_submits) ||
701 atomic_read(&root->fs_info->async_delalloc_pages)) {
702 wait_event(root->fs_info->async_submit_wait,
703 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
704 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
705 }
706 atomic_dec(&root->fs_info->async_submit_draining);
707
708 mutex_lock(&inode->i_mutex);
709 BTRFS_I(inode)->force_compress = 0;
710 mutex_unlock(&inode->i_mutex);
711 }
712
676 return 0; 713 return 0;
677 714
678err_reservations: 715err_reservations:
@@ -1284,10 +1321,11 @@ out:
1284 return err; 1321 return err;
1285} 1322}
1286 1323
1287static int btrfs_ioctl_defrag(struct file *file) 1324static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
1288{ 1325{
1289 struct inode *inode = fdentry(file)->d_inode; 1326 struct inode *inode = fdentry(file)->d_inode;
1290 struct btrfs_root *root = BTRFS_I(inode)->root; 1327 struct btrfs_root *root = BTRFS_I(inode)->root;
1328 struct btrfs_ioctl_defrag_range_args *range;
1291 int ret; 1329 int ret;
1292 1330
1293 ret = mnt_want_write(file->f_path.mnt); 1331 ret = mnt_want_write(file->f_path.mnt);
@@ -1308,7 +1346,30 @@ static int btrfs_ioctl_defrag(struct file *file)
1308 ret = -EINVAL; 1346 ret = -EINVAL;
1309 goto out; 1347 goto out;
1310 } 1348 }
1311 btrfs_defrag_file(file); 1349
1350 range = kzalloc(sizeof(*range), GFP_KERNEL);
1351 if (!range) {
1352 ret = -ENOMEM;
1353 goto out;
1354 }
1355
1356 if (argp) {
1357 if (copy_from_user(range, argp,
1358 sizeof(*range))) {
1359 ret = -EFAULT;
1360 kfree(range);
1361 }
1362 /* compression requires us to start the IO */
1363 if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
1364 range->flags |= BTRFS_DEFRAG_RANGE_START_IO;
1365 range->extent_thresh = (u32)-1;
1366 }
1367 } else {
1368 /* the rest are all set to zero by kzalloc */
1369 range->len = (u64)-1;
1370 }
1371 btrfs_defrag_file(file, range);
1372 kfree(range);
1312 break; 1373 break;
1313 } 1374 }
1314out: 1375out:
@@ -1831,7 +1892,9 @@ long btrfs_ioctl(struct file *file, unsigned int
1831 case BTRFS_IOC_DEFAULT_SUBVOL: 1892 case BTRFS_IOC_DEFAULT_SUBVOL:
1832 return btrfs_ioctl_default_subvol(file, argp); 1893 return btrfs_ioctl_default_subvol(file, argp);
1833 case BTRFS_IOC_DEFRAG: 1894 case BTRFS_IOC_DEFRAG:
1834 return btrfs_ioctl_defrag(file); 1895 return btrfs_ioctl_defrag(file, NULL);
1896 case BTRFS_IOC_DEFRAG_RANGE:
1897 return btrfs_ioctl_defrag(file, argp);
1835 case BTRFS_IOC_RESIZE: 1898 case BTRFS_IOC_RESIZE:
1836 return btrfs_ioctl_resize(root, argp); 1899 return btrfs_ioctl_resize(root, argp);
1837 case BTRFS_IOC_ADD_DEV: 1900 case BTRFS_IOC_ADD_DEV: