diff options
Diffstat (limited to 'fs/btrfs/ioctl.c')
-rw-r--r-- | fs/btrfs/ioctl.c | 706 |
1 files changed, 690 insertions, 16 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 645a17927a8f..2845c6ceecd2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include "print-tree.h" | 48 | #include "print-tree.h" |
49 | #include "volumes.h" | 49 | #include "volumes.h" |
50 | #include "locking.h" | 50 | #include "locking.h" |
51 | #include "ctree.h" | ||
51 | 52 | ||
52 | /* Mask out flags that are inappropriate for the given type of inode. */ | 53 | /* Mask out flags that are inappropriate for the given type of inode. */ |
53 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 54 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -474,7 +475,79 @@ out_unlock: | |||
474 | return error; | 475 | return error; |
475 | } | 476 | } |
476 | 477 | ||
477 | static int btrfs_defrag_file(struct file *file) | 478 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, |
479 | int thresh, u64 *last_len, u64 *skip, | ||
480 | u64 *defrag_end) | ||
481 | { | ||
482 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | ||
483 | struct extent_map *em = NULL; | ||
484 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
485 | int ret = 1; | ||
486 | |||
487 | |||
488 | if (thresh == 0) | ||
489 | thresh = 256 * 1024; | ||
490 | |||
491 | /* | ||
492 | * make sure that once we start defragging and extent, we keep on | ||
493 | * defragging it | ||
494 | */ | ||
495 | if (start < *defrag_end) | ||
496 | return 1; | ||
497 | |||
498 | *skip = 0; | ||
499 | |||
500 | /* | ||
501 | * hopefully we have this extent in the tree already, try without | ||
502 | * the full extent lock | ||
503 | */ | ||
504 | read_lock(&em_tree->lock); | ||
505 | em = lookup_extent_mapping(em_tree, start, len); | ||
506 | read_unlock(&em_tree->lock); | ||
507 | |||
508 | if (!em) { | ||
509 | /* get the big lock and read metadata off disk */ | ||
510 | lock_extent(io_tree, start, start + len - 1, GFP_NOFS); | ||
511 | em = btrfs_get_extent(inode, NULL, 0, start, len, 0); | ||
512 | unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); | ||
513 | |||
514 | if (!em) | ||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /* this will cover holes, and inline extents */ | ||
519 | if (em->block_start >= EXTENT_MAP_LAST_BYTE) | ||
520 | ret = 0; | ||
521 | |||
522 | /* | ||
523 | * we hit a real extent, if it is big don't bother defragging it again | ||
524 | */ | ||
525 | if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) | ||
526 | ret = 0; | ||
527 | |||
528 | /* | ||
529 | * last_len ends up being a counter of how many bytes we've defragged. | ||
530 | * every time we choose not to defrag an extent, we reset *last_len | ||
531 | * so that the next tiny extent will force a defrag. | ||
532 | * | ||
533 | * The end result of this is that tiny extents before a single big | ||
534 | * extent will force at least part of that big extent to be defragged. | ||
535 | */ | ||
536 | if (ret) { | ||
537 | *last_len += len; | ||
538 | *defrag_end = extent_map_end(em); | ||
539 | } else { | ||
540 | *last_len = 0; | ||
541 | *skip = extent_map_end(em); | ||
542 | *defrag_end = 0; | ||
543 | } | ||
544 | |||
545 | free_extent_map(em); | ||
546 | return ret; | ||
547 | } | ||
548 | |||
549 | static int btrfs_defrag_file(struct file *file, | ||
550 | struct btrfs_ioctl_defrag_range_args *range) | ||
478 | { | 551 | { |
479 | struct inode *inode = fdentry(file)->d_inode; | 552 | struct inode *inode = fdentry(file)->d_inode; |
480 | struct btrfs_root *root = BTRFS_I(inode)->root; | 553 | struct btrfs_root *root = BTRFS_I(inode)->root; |
@@ -486,37 +559,96 @@ static int btrfs_defrag_file(struct file *file) | |||
486 | unsigned long total_read = 0; | 559 | unsigned long total_read = 0; |
487 | u64 page_start; | 560 | u64 page_start; |
488 | u64 page_end; | 561 | u64 page_end; |
562 | u64 last_len = 0; | ||
563 | u64 skip = 0; | ||
564 | u64 defrag_end = 0; | ||
489 | unsigned long i; | 565 | unsigned long i; |
490 | int ret; | 566 | int ret; |
491 | 567 | ||
492 | ret = btrfs_check_data_free_space(root, inode, inode->i_size); | 568 | if (inode->i_size == 0) |
493 | if (ret) | 569 | return 0; |
494 | return -ENOSPC; | 570 | |
571 | if (range->start + range->len > range->start) { | ||
572 | last_index = min_t(u64, inode->i_size - 1, | ||
573 | range->start + range->len - 1) >> PAGE_CACHE_SHIFT; | ||
574 | } else { | ||
575 | last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; | ||
576 | } | ||
577 | |||
578 | i = range->start >> PAGE_CACHE_SHIFT; | ||
579 | while (i <= last_index) { | ||
580 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | ||
581 | PAGE_CACHE_SIZE, | ||
582 | range->extent_thresh, | ||
583 | &last_len, &skip, | ||
584 | &defrag_end)) { | ||
585 | unsigned long next; | ||
586 | /* | ||
587 | * the should_defrag function tells us how much to skip | ||
588 | * bump our counter by the suggested amount | ||
589 | */ | ||
590 | next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
591 | i = max(i + 1, next); | ||
592 | continue; | ||
593 | } | ||
495 | 594 | ||
496 | mutex_lock(&inode->i_mutex); | ||
497 | last_index = inode->i_size >> PAGE_CACHE_SHIFT; | ||
498 | for (i = 0; i <= last_index; i++) { | ||
499 | if (total_read % ra_pages == 0) { | 595 | if (total_read % ra_pages == 0) { |
500 | btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, | 596 | btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, |
501 | min(last_index, i + ra_pages - 1)); | 597 | min(last_index, i + ra_pages - 1)); |
502 | } | 598 | } |
503 | total_read++; | 599 | total_read++; |
600 | mutex_lock(&inode->i_mutex); | ||
601 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) | ||
602 | BTRFS_I(inode)->force_compress = 1; | ||
603 | |||
604 | ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); | ||
605 | if (ret) { | ||
606 | ret = -ENOSPC; | ||
607 | break; | ||
608 | } | ||
609 | |||
610 | ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); | ||
611 | if (ret) { | ||
612 | btrfs_free_reserved_data_space(root, inode, | ||
613 | PAGE_CACHE_SIZE); | ||
614 | ret = -ENOSPC; | ||
615 | break; | ||
616 | } | ||
504 | again: | 617 | again: |
618 | if (inode->i_size == 0 || | ||
619 | i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { | ||
620 | ret = 0; | ||
621 | goto err_reservations; | ||
622 | } | ||
623 | |||
505 | page = grab_cache_page(inode->i_mapping, i); | 624 | page = grab_cache_page(inode->i_mapping, i); |
506 | if (!page) | 625 | if (!page) |
507 | goto out_unlock; | 626 | goto err_reservations; |
627 | |||
508 | if (!PageUptodate(page)) { | 628 | if (!PageUptodate(page)) { |
509 | btrfs_readpage(NULL, page); | 629 | btrfs_readpage(NULL, page); |
510 | lock_page(page); | 630 | lock_page(page); |
511 | if (!PageUptodate(page)) { | 631 | if (!PageUptodate(page)) { |
512 | unlock_page(page); | 632 | unlock_page(page); |
513 | page_cache_release(page); | 633 | page_cache_release(page); |
514 | goto out_unlock; | 634 | goto err_reservations; |
515 | } | 635 | } |
516 | } | 636 | } |
517 | 637 | ||
638 | if (page->mapping != inode->i_mapping) { | ||
639 | unlock_page(page); | ||
640 | page_cache_release(page); | ||
641 | goto again; | ||
642 | } | ||
643 | |||
518 | wait_on_page_writeback(page); | 644 | wait_on_page_writeback(page); |
519 | 645 | ||
646 | if (PageDirty(page)) { | ||
647 | btrfs_free_reserved_data_space(root, inode, | ||
648 | PAGE_CACHE_SIZE); | ||
649 | goto loop_unlock; | ||
650 | } | ||
651 | |||
520 | page_start = (u64)page->index << PAGE_CACHE_SHIFT; | 652 | page_start = (u64)page->index << PAGE_CACHE_SHIFT; |
521 | page_end = page_start + PAGE_CACHE_SIZE - 1; | 653 | page_end = page_start + PAGE_CACHE_SIZE - 1; |
522 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | 654 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); |
@@ -537,18 +669,54 @@ again: | |||
537 | * page if it is dirtied again later | 669 | * page if it is dirtied again later |
538 | */ | 670 | */ |
539 | clear_page_dirty_for_io(page); | 671 | clear_page_dirty_for_io(page); |
672 | clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, | ||
673 | page_end, EXTENT_DIRTY | EXTENT_DELALLOC | | ||
674 | EXTENT_DO_ACCOUNTING, GFP_NOFS); | ||
540 | 675 | ||
541 | btrfs_set_extent_delalloc(inode, page_start, page_end); | 676 | btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); |
677 | ClearPageChecked(page); | ||
542 | set_page_dirty(page); | 678 | set_page_dirty(page); |
543 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 679 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); |
680 | |||
681 | loop_unlock: | ||
544 | unlock_page(page); | 682 | unlock_page(page); |
545 | page_cache_release(page); | 683 | page_cache_release(page); |
684 | mutex_unlock(&inode->i_mutex); | ||
685 | |||
686 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | ||
546 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); | 687 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); |
688 | i++; | ||
689 | } | ||
690 | |||
691 | if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) | ||
692 | filemap_flush(inode->i_mapping); | ||
693 | |||
694 | if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { | ||
695 | /* the filemap_flush will queue IO into the worker threads, but | ||
696 | * we have to make sure the IO is actually started and that | ||
697 | * ordered extents get created before we return | ||
698 | */ | ||
699 | atomic_inc(&root->fs_info->async_submit_draining); | ||
700 | while (atomic_read(&root->fs_info->nr_async_submits) || | ||
701 | atomic_read(&root->fs_info->async_delalloc_pages)) { | ||
702 | wait_event(root->fs_info->async_submit_wait, | ||
703 | (atomic_read(&root->fs_info->nr_async_submits) == 0 && | ||
704 | atomic_read(&root->fs_info->async_delalloc_pages) == 0)); | ||
705 | } | ||
706 | atomic_dec(&root->fs_info->async_submit_draining); | ||
707 | |||
708 | mutex_lock(&inode->i_mutex); | ||
709 | BTRFS_I(inode)->force_compress = 0; | ||
710 | mutex_unlock(&inode->i_mutex); | ||
547 | } | 711 | } |
548 | 712 | ||
549 | out_unlock: | ||
550 | mutex_unlock(&inode->i_mutex); | ||
551 | return 0; | 713 | return 0; |
714 | |||
715 | err_reservations: | ||
716 | mutex_unlock(&inode->i_mutex); | ||
717 | btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); | ||
718 | btrfs_unreserve_metadata_for_delalloc(root, inode, 1); | ||
719 | return ret; | ||
552 | } | 720 | } |
553 | 721 | ||
554 | static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | 722 | static noinline int btrfs_ioctl_resize(struct btrfs_root *root, |
@@ -608,7 +776,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, | |||
608 | mod = 1; | 776 | mod = 1; |
609 | sizestr++; | 777 | sizestr++; |
610 | } | 778 | } |
611 | new_size = btrfs_parse_size(sizestr); | 779 | new_size = memparse(sizestr, NULL); |
612 | if (new_size == 0) { | 780 | if (new_size == 0) { |
613 | ret = -EINVAL; | 781 | ret = -EINVAL; |
614 | goto out_unlock; | 782 | goto out_unlock; |
@@ -743,6 +911,327 @@ out: | |||
743 | return ret; | 911 | return ret; |
744 | } | 912 | } |
745 | 913 | ||
914 | static noinline int key_in_sk(struct btrfs_key *key, | ||
915 | struct btrfs_ioctl_search_key *sk) | ||
916 | { | ||
917 | struct btrfs_key test; | ||
918 | int ret; | ||
919 | |||
920 | test.objectid = sk->min_objectid; | ||
921 | test.type = sk->min_type; | ||
922 | test.offset = sk->min_offset; | ||
923 | |||
924 | ret = btrfs_comp_cpu_keys(key, &test); | ||
925 | if (ret < 0) | ||
926 | return 0; | ||
927 | |||
928 | test.objectid = sk->max_objectid; | ||
929 | test.type = sk->max_type; | ||
930 | test.offset = sk->max_offset; | ||
931 | |||
932 | ret = btrfs_comp_cpu_keys(key, &test); | ||
933 | if (ret > 0) | ||
934 | return 0; | ||
935 | return 1; | ||
936 | } | ||
937 | |||
938 | static noinline int copy_to_sk(struct btrfs_root *root, | ||
939 | struct btrfs_path *path, | ||
940 | struct btrfs_key *key, | ||
941 | struct btrfs_ioctl_search_key *sk, | ||
942 | char *buf, | ||
943 | unsigned long *sk_offset, | ||
944 | int *num_found) | ||
945 | { | ||
946 | u64 found_transid; | ||
947 | struct extent_buffer *leaf; | ||
948 | struct btrfs_ioctl_search_header sh; | ||
949 | unsigned long item_off; | ||
950 | unsigned long item_len; | ||
951 | int nritems; | ||
952 | int i; | ||
953 | int slot; | ||
954 | int found = 0; | ||
955 | int ret = 0; | ||
956 | |||
957 | leaf = path->nodes[0]; | ||
958 | slot = path->slots[0]; | ||
959 | nritems = btrfs_header_nritems(leaf); | ||
960 | |||
961 | if (btrfs_header_generation(leaf) > sk->max_transid) { | ||
962 | i = nritems; | ||
963 | goto advance_key; | ||
964 | } | ||
965 | found_transid = btrfs_header_generation(leaf); | ||
966 | |||
967 | for (i = slot; i < nritems; i++) { | ||
968 | item_off = btrfs_item_ptr_offset(leaf, i); | ||
969 | item_len = btrfs_item_size_nr(leaf, i); | ||
970 | |||
971 | if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) | ||
972 | item_len = 0; | ||
973 | |||
974 | if (sizeof(sh) + item_len + *sk_offset > | ||
975 | BTRFS_SEARCH_ARGS_BUFSIZE) { | ||
976 | ret = 1; | ||
977 | goto overflow; | ||
978 | } | ||
979 | |||
980 | btrfs_item_key_to_cpu(leaf, key, i); | ||
981 | if (!key_in_sk(key, sk)) | ||
982 | continue; | ||
983 | |||
984 | sh.objectid = key->objectid; | ||
985 | sh.offset = key->offset; | ||
986 | sh.type = key->type; | ||
987 | sh.len = item_len; | ||
988 | sh.transid = found_transid; | ||
989 | |||
990 | /* copy search result header */ | ||
991 | memcpy(buf + *sk_offset, &sh, sizeof(sh)); | ||
992 | *sk_offset += sizeof(sh); | ||
993 | |||
994 | if (item_len) { | ||
995 | char *p = buf + *sk_offset; | ||
996 | /* copy the item */ | ||
997 | read_extent_buffer(leaf, p, | ||
998 | item_off, item_len); | ||
999 | *sk_offset += item_len; | ||
1000 | } | ||
1001 | found++; | ||
1002 | |||
1003 | if (*num_found >= sk->nr_items) | ||
1004 | break; | ||
1005 | } | ||
1006 | advance_key: | ||
1007 | ret = 0; | ||
1008 | if (key->offset < (u64)-1 && key->offset < sk->max_offset) | ||
1009 | key->offset++; | ||
1010 | else if (key->type < (u8)-1 && key->type < sk->max_type) { | ||
1011 | key->offset = 0; | ||
1012 | key->type++; | ||
1013 | } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { | ||
1014 | key->offset = 0; | ||
1015 | key->type = 0; | ||
1016 | key->objectid++; | ||
1017 | } else | ||
1018 | ret = 1; | ||
1019 | overflow: | ||
1020 | *num_found += found; | ||
1021 | return ret; | ||
1022 | } | ||
1023 | |||
1024 | static noinline int search_ioctl(struct inode *inode, | ||
1025 | struct btrfs_ioctl_search_args *args) | ||
1026 | { | ||
1027 | struct btrfs_root *root; | ||
1028 | struct btrfs_key key; | ||
1029 | struct btrfs_key max_key; | ||
1030 | struct btrfs_path *path; | ||
1031 | struct btrfs_ioctl_search_key *sk = &args->key; | ||
1032 | struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; | ||
1033 | int ret; | ||
1034 | int num_found = 0; | ||
1035 | unsigned long sk_offset = 0; | ||
1036 | |||
1037 | path = btrfs_alloc_path(); | ||
1038 | if (!path) | ||
1039 | return -ENOMEM; | ||
1040 | |||
1041 | if (sk->tree_id == 0) { | ||
1042 | /* search the root of the inode that was passed */ | ||
1043 | root = BTRFS_I(inode)->root; | ||
1044 | } else { | ||
1045 | key.objectid = sk->tree_id; | ||
1046 | key.type = BTRFS_ROOT_ITEM_KEY; | ||
1047 | key.offset = (u64)-1; | ||
1048 | root = btrfs_read_fs_root_no_name(info, &key); | ||
1049 | if (IS_ERR(root)) { | ||
1050 | printk(KERN_ERR "could not find root %llu\n", | ||
1051 | sk->tree_id); | ||
1052 | btrfs_free_path(path); | ||
1053 | return -ENOENT; | ||
1054 | } | ||
1055 | } | ||
1056 | |||
1057 | key.objectid = sk->min_objectid; | ||
1058 | key.type = sk->min_type; | ||
1059 | key.offset = sk->min_offset; | ||
1060 | |||
1061 | max_key.objectid = sk->max_objectid; | ||
1062 | max_key.type = sk->max_type; | ||
1063 | max_key.offset = sk->max_offset; | ||
1064 | |||
1065 | path->keep_locks = 1; | ||
1066 | |||
1067 | while(1) { | ||
1068 | ret = btrfs_search_forward(root, &key, &max_key, path, 0, | ||
1069 | sk->min_transid); | ||
1070 | if (ret != 0) { | ||
1071 | if (ret > 0) | ||
1072 | ret = 0; | ||
1073 | goto err; | ||
1074 | } | ||
1075 | ret = copy_to_sk(root, path, &key, sk, args->buf, | ||
1076 | &sk_offset, &num_found); | ||
1077 | btrfs_release_path(root, path); | ||
1078 | if (ret || num_found >= sk->nr_items) | ||
1079 | break; | ||
1080 | |||
1081 | } | ||
1082 | ret = 0; | ||
1083 | err: | ||
1084 | sk->nr_items = num_found; | ||
1085 | btrfs_free_path(path); | ||
1086 | return ret; | ||
1087 | } | ||
1088 | |||
1089 | static noinline int btrfs_ioctl_tree_search(struct file *file, | ||
1090 | void __user *argp) | ||
1091 | { | ||
1092 | struct btrfs_ioctl_search_args *args; | ||
1093 | struct inode *inode; | ||
1094 | int ret; | ||
1095 | |||
1096 | if (!capable(CAP_SYS_ADMIN)) | ||
1097 | return -EPERM; | ||
1098 | |||
1099 | args = kmalloc(sizeof(*args), GFP_KERNEL); | ||
1100 | if (!args) | ||
1101 | return -ENOMEM; | ||
1102 | |||
1103 | if (copy_from_user(args, argp, sizeof(*args))) { | ||
1104 | kfree(args); | ||
1105 | return -EFAULT; | ||
1106 | } | ||
1107 | inode = fdentry(file)->d_inode; | ||
1108 | ret = search_ioctl(inode, args); | ||
1109 | if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) | ||
1110 | ret = -EFAULT; | ||
1111 | kfree(args); | ||
1112 | return ret; | ||
1113 | } | ||
1114 | |||
1115 | /* | ||
1116 | * Search INODE_REFs to identify path name of 'dirid' directory | ||
1117 | * in a 'tree_id' tree. and sets path name to 'name'. | ||
1118 | */ | ||
1119 | static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, | ||
1120 | u64 tree_id, u64 dirid, char *name) | ||
1121 | { | ||
1122 | struct btrfs_root *root; | ||
1123 | struct btrfs_key key; | ||
1124 | char *ptr; | ||
1125 | int ret = -1; | ||
1126 | int slot; | ||
1127 | int len; | ||
1128 | int total_len = 0; | ||
1129 | struct btrfs_inode_ref *iref; | ||
1130 | struct extent_buffer *l; | ||
1131 | struct btrfs_path *path; | ||
1132 | |||
1133 | if (dirid == BTRFS_FIRST_FREE_OBJECTID) { | ||
1134 | name[0]='\0'; | ||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | path = btrfs_alloc_path(); | ||
1139 | if (!path) | ||
1140 | return -ENOMEM; | ||
1141 | |||
1142 | ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; | ||
1143 | |||
1144 | key.objectid = tree_id; | ||
1145 | key.type = BTRFS_ROOT_ITEM_KEY; | ||
1146 | key.offset = (u64)-1; | ||
1147 | root = btrfs_read_fs_root_no_name(info, &key); | ||
1148 | if (IS_ERR(root)) { | ||
1149 | printk(KERN_ERR "could not find root %llu\n", tree_id); | ||
1150 | ret = -ENOENT; | ||
1151 | goto out; | ||
1152 | } | ||
1153 | |||
1154 | key.objectid = dirid; | ||
1155 | key.type = BTRFS_INODE_REF_KEY; | ||
1156 | key.offset = (u64)-1; | ||
1157 | |||
1158 | while(1) { | ||
1159 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
1160 | if (ret < 0) | ||
1161 | goto out; | ||
1162 | |||
1163 | l = path->nodes[0]; | ||
1164 | slot = path->slots[0]; | ||
1165 | if (ret > 0 && slot > 0) | ||
1166 | slot--; | ||
1167 | btrfs_item_key_to_cpu(l, &key, slot); | ||
1168 | |||
1169 | if (ret > 0 && (key.objectid != dirid || | ||
1170 | key.type != BTRFS_INODE_REF_KEY)) { | ||
1171 | ret = -ENOENT; | ||
1172 | goto out; | ||
1173 | } | ||
1174 | |||
1175 | iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); | ||
1176 | len = btrfs_inode_ref_name_len(l, iref); | ||
1177 | ptr -= len + 1; | ||
1178 | total_len += len + 1; | ||
1179 | if (ptr < name) | ||
1180 | goto out; | ||
1181 | |||
1182 | *(ptr + len) = '/'; | ||
1183 | read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len); | ||
1184 | |||
1185 | if (key.offset == BTRFS_FIRST_FREE_OBJECTID) | ||
1186 | break; | ||
1187 | |||
1188 | btrfs_release_path(root, path); | ||
1189 | key.objectid = key.offset; | ||
1190 | key.offset = (u64)-1; | ||
1191 | dirid = key.objectid; | ||
1192 | |||
1193 | } | ||
1194 | if (ptr < name) | ||
1195 | goto out; | ||
1196 | memcpy(name, ptr, total_len); | ||
1197 | name[total_len]='\0'; | ||
1198 | ret = 0; | ||
1199 | out: | ||
1200 | btrfs_free_path(path); | ||
1201 | return ret; | ||
1202 | } | ||
1203 | |||
1204 | static noinline int btrfs_ioctl_ino_lookup(struct file *file, | ||
1205 | void __user *argp) | ||
1206 | { | ||
1207 | struct btrfs_ioctl_ino_lookup_args *args; | ||
1208 | struct inode *inode; | ||
1209 | int ret; | ||
1210 | |||
1211 | if (!capable(CAP_SYS_ADMIN)) | ||
1212 | return -EPERM; | ||
1213 | |||
1214 | args = kmalloc(sizeof(*args), GFP_KERNEL); | ||
1215 | if (copy_from_user(args, argp, sizeof(*args))) { | ||
1216 | kfree(args); | ||
1217 | return -EFAULT; | ||
1218 | } | ||
1219 | inode = fdentry(file)->d_inode; | ||
1220 | |||
1221 | if (args->treeid == 0) | ||
1222 | args->treeid = BTRFS_I(inode)->root->root_key.objectid; | ||
1223 | |||
1224 | ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, | ||
1225 | args->treeid, args->objectid, | ||
1226 | args->name); | ||
1227 | |||
1228 | if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) | ||
1229 | ret = -EFAULT; | ||
1230 | |||
1231 | kfree(args); | ||
1232 | return ret; | ||
1233 | } | ||
1234 | |||
746 | static noinline int btrfs_ioctl_snap_destroy(struct file *file, | 1235 | static noinline int btrfs_ioctl_snap_destroy(struct file *file, |
747 | void __user *arg) | 1236 | void __user *arg) |
748 | { | 1237 | { |
@@ -849,10 +1338,11 @@ out: | |||
849 | return err; | 1338 | return err; |
850 | } | 1339 | } |
851 | 1340 | ||
852 | static int btrfs_ioctl_defrag(struct file *file) | 1341 | static int btrfs_ioctl_defrag(struct file *file, void __user *argp) |
853 | { | 1342 | { |
854 | struct inode *inode = fdentry(file)->d_inode; | 1343 | struct inode *inode = fdentry(file)->d_inode; |
855 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1344 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1345 | struct btrfs_ioctl_defrag_range_args *range; | ||
856 | int ret; | 1346 | int ret; |
857 | 1347 | ||
858 | ret = mnt_want_write(file->f_path.mnt); | 1348 | ret = mnt_want_write(file->f_path.mnt); |
@@ -873,7 +1363,30 @@ static int btrfs_ioctl_defrag(struct file *file) | |||
873 | ret = -EINVAL; | 1363 | ret = -EINVAL; |
874 | goto out; | 1364 | goto out; |
875 | } | 1365 | } |
876 | btrfs_defrag_file(file); | 1366 | |
1367 | range = kzalloc(sizeof(*range), GFP_KERNEL); | ||
1368 | if (!range) { | ||
1369 | ret = -ENOMEM; | ||
1370 | goto out; | ||
1371 | } | ||
1372 | |||
1373 | if (argp) { | ||
1374 | if (copy_from_user(range, argp, | ||
1375 | sizeof(*range))) { | ||
1376 | ret = -EFAULT; | ||
1377 | kfree(range); | ||
1378 | } | ||
1379 | /* compression requires us to start the IO */ | ||
1380 | if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { | ||
1381 | range->flags |= BTRFS_DEFRAG_RANGE_START_IO; | ||
1382 | range->extent_thresh = (u32)-1; | ||
1383 | } | ||
1384 | } else { | ||
1385 | /* the rest are all set to zero by kzalloc */ | ||
1386 | range->len = (u64)-1; | ||
1387 | } | ||
1388 | btrfs_defrag_file(file, range); | ||
1389 | kfree(range); | ||
877 | break; | 1390 | break; |
878 | } | 1391 | } |
879 | out: | 1392 | out: |
@@ -1274,6 +1787,157 @@ out: | |||
1274 | return ret; | 1787 | return ret; |
1275 | } | 1788 | } |
1276 | 1789 | ||
1790 | static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | ||
1791 | { | ||
1792 | struct inode *inode = fdentry(file)->d_inode; | ||
1793 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
1794 | struct btrfs_root *new_root; | ||
1795 | struct btrfs_dir_item *di; | ||
1796 | struct btrfs_trans_handle *trans; | ||
1797 | struct btrfs_path *path; | ||
1798 | struct btrfs_key location; | ||
1799 | struct btrfs_disk_key disk_key; | ||
1800 | struct btrfs_super_block *disk_super; | ||
1801 | u64 features; | ||
1802 | u64 objectid = 0; | ||
1803 | u64 dir_id; | ||
1804 | |||
1805 | if (!capable(CAP_SYS_ADMIN)) | ||
1806 | return -EPERM; | ||
1807 | |||
1808 | if (copy_from_user(&objectid, argp, sizeof(objectid))) | ||
1809 | return -EFAULT; | ||
1810 | |||
1811 | if (!objectid) | ||
1812 | objectid = root->root_key.objectid; | ||
1813 | |||
1814 | location.objectid = objectid; | ||
1815 | location.type = BTRFS_ROOT_ITEM_KEY; | ||
1816 | location.offset = (u64)-1; | ||
1817 | |||
1818 | new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); | ||
1819 | if (IS_ERR(new_root)) | ||
1820 | return PTR_ERR(new_root); | ||
1821 | |||
1822 | if (btrfs_root_refs(&new_root->root_item) == 0) | ||
1823 | return -ENOENT; | ||
1824 | |||
1825 | path = btrfs_alloc_path(); | ||
1826 | if (!path) | ||
1827 | return -ENOMEM; | ||
1828 | path->leave_spinning = 1; | ||
1829 | |||
1830 | trans = btrfs_start_transaction(root, 1); | ||
1831 | if (!trans) { | ||
1832 | btrfs_free_path(path); | ||
1833 | return -ENOMEM; | ||
1834 | } | ||
1835 | |||
1836 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | ||
1837 | di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, | ||
1838 | dir_id, "default", 7, 1); | ||
1839 | if (!di) { | ||
1840 | btrfs_free_path(path); | ||
1841 | btrfs_end_transaction(trans, root); | ||
1842 | printk(KERN_ERR "Umm, you don't have the default dir item, " | ||
1843 | "this isn't going to work\n"); | ||
1844 | return -ENOENT; | ||
1845 | } | ||
1846 | |||
1847 | btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); | ||
1848 | btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); | ||
1849 | btrfs_mark_buffer_dirty(path->nodes[0]); | ||
1850 | btrfs_free_path(path); | ||
1851 | |||
1852 | disk_super = &root->fs_info->super_copy; | ||
1853 | features = btrfs_super_incompat_flags(disk_super); | ||
1854 | if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) { | ||
1855 | features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL; | ||
1856 | btrfs_set_super_incompat_flags(disk_super, features); | ||
1857 | } | ||
1858 | btrfs_end_transaction(trans, root); | ||
1859 | |||
1860 | return 0; | ||
1861 | } | ||
1862 | |||
1863 | long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | ||
1864 | { | ||
1865 | struct btrfs_ioctl_space_args space_args; | ||
1866 | struct btrfs_ioctl_space_info space; | ||
1867 | struct btrfs_ioctl_space_info *dest; | ||
1868 | struct btrfs_ioctl_space_info *dest_orig; | ||
1869 | struct btrfs_ioctl_space_info *user_dest; | ||
1870 | struct btrfs_space_info *info; | ||
1871 | int alloc_size; | ||
1872 | int ret = 0; | ||
1873 | int slot_count = 0; | ||
1874 | |||
1875 | if (copy_from_user(&space_args, | ||
1876 | (struct btrfs_ioctl_space_args __user *)arg, | ||
1877 | sizeof(space_args))) | ||
1878 | return -EFAULT; | ||
1879 | |||
1880 | /* first we count slots */ | ||
1881 | rcu_read_lock(); | ||
1882 | list_for_each_entry_rcu(info, &root->fs_info->space_info, list) | ||
1883 | slot_count++; | ||
1884 | rcu_read_unlock(); | ||
1885 | |||
1886 | /* space_slots == 0 means they are asking for a count */ | ||
1887 | if (space_args.space_slots == 0) { | ||
1888 | space_args.total_spaces = slot_count; | ||
1889 | goto out; | ||
1890 | } | ||
1891 | alloc_size = sizeof(*dest) * slot_count; | ||
1892 | /* we generally have at most 6 or so space infos, one for each raid | ||
1893 | * level. So, a whole page should be more than enough for everyone | ||
1894 | */ | ||
1895 | if (alloc_size > PAGE_CACHE_SIZE) | ||
1896 | return -ENOMEM; | ||
1897 | |||
1898 | space_args.total_spaces = 0; | ||
1899 | dest = kmalloc(alloc_size, GFP_NOFS); | ||
1900 | if (!dest) | ||
1901 | return -ENOMEM; | ||
1902 | dest_orig = dest; | ||
1903 | |||
1904 | /* now we have a buffer to copy into */ | ||
1905 | rcu_read_lock(); | ||
1906 | list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { | ||
1907 | /* make sure we don't copy more than we allocated | ||
1908 | * in our buffer | ||
1909 | */ | ||
1910 | if (slot_count == 0) | ||
1911 | break; | ||
1912 | slot_count--; | ||
1913 | |||
1914 | /* make sure userland has enough room in their buffer */ | ||
1915 | if (space_args.total_spaces >= space_args.space_slots) | ||
1916 | break; | ||
1917 | |||
1918 | space.flags = info->flags; | ||
1919 | space.total_bytes = info->total_bytes; | ||
1920 | space.used_bytes = info->bytes_used; | ||
1921 | memcpy(dest, &space, sizeof(space)); | ||
1922 | dest++; | ||
1923 | space_args.total_spaces++; | ||
1924 | } | ||
1925 | rcu_read_unlock(); | ||
1926 | |||
1927 | user_dest = (struct btrfs_ioctl_space_info *) | ||
1928 | (arg + sizeof(struct btrfs_ioctl_space_args)); | ||
1929 | |||
1930 | if (copy_to_user(user_dest, dest_orig, alloc_size)) | ||
1931 | ret = -EFAULT; | ||
1932 | |||
1933 | kfree(dest_orig); | ||
1934 | out: | ||
1935 | if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) | ||
1936 | ret = -EFAULT; | ||
1937 | |||
1938 | return ret; | ||
1939 | } | ||
1940 | |||
1277 | /* | 1941 | /* |
1278 | * there are many ways the trans_start and trans_end ioctls can lead | 1942 | * there are many ways the trans_start and trans_end ioctls can lead |
1279 | * to deadlocks. They should only be used by applications that | 1943 | * to deadlocks. They should only be used by applications that |
@@ -1320,8 +1984,12 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
1320 | return btrfs_ioctl_snap_create(file, argp, 1); | 1984 | return btrfs_ioctl_snap_create(file, argp, 1); |
1321 | case BTRFS_IOC_SNAP_DESTROY: | 1985 | case BTRFS_IOC_SNAP_DESTROY: |
1322 | return btrfs_ioctl_snap_destroy(file, argp); | 1986 | return btrfs_ioctl_snap_destroy(file, argp); |
1987 | case BTRFS_IOC_DEFAULT_SUBVOL: | ||
1988 | return btrfs_ioctl_default_subvol(file, argp); | ||
1323 | case BTRFS_IOC_DEFRAG: | 1989 | case BTRFS_IOC_DEFRAG: |
1324 | return btrfs_ioctl_defrag(file); | 1990 | return btrfs_ioctl_defrag(file, NULL); |
1991 | case BTRFS_IOC_DEFRAG_RANGE: | ||
1992 | return btrfs_ioctl_defrag(file, argp); | ||
1325 | case BTRFS_IOC_RESIZE: | 1993 | case BTRFS_IOC_RESIZE: |
1326 | return btrfs_ioctl_resize(root, argp); | 1994 | return btrfs_ioctl_resize(root, argp); |
1327 | case BTRFS_IOC_ADD_DEV: | 1995 | case BTRFS_IOC_ADD_DEV: |
@@ -1338,6 +2006,12 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
1338 | return btrfs_ioctl_trans_start(file); | 2006 | return btrfs_ioctl_trans_start(file); |
1339 | case BTRFS_IOC_TRANS_END: | 2007 | case BTRFS_IOC_TRANS_END: |
1340 | return btrfs_ioctl_trans_end(file); | 2008 | return btrfs_ioctl_trans_end(file); |
2009 | case BTRFS_IOC_TREE_SEARCH: | ||
2010 | return btrfs_ioctl_tree_search(file, argp); | ||
2011 | case BTRFS_IOC_INO_LOOKUP: | ||
2012 | return btrfs_ioctl_ino_lookup(file, argp); | ||
2013 | case BTRFS_IOC_SPACE_INFO: | ||
2014 | return btrfs_ioctl_space_info(root, argp); | ||
1341 | case BTRFS_IOC_SYNC: | 2015 | case BTRFS_IOC_SYNC: |
1342 | btrfs_sync_fs(file->f_dentry->d_sb, 1); | 2016 | btrfs_sync_fs(file->f_dentry->d_sb, 1); |
1343 | return 0; | 2017 | return 0; |