aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ioctl.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-30 15:44:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-30 15:44:29 -0400
commit9613bebb223dea3179c265dc31e1bb41ae39f321 (patch)
tree39bf883573d23775a53be3172323c0237fef5630 /fs/btrfs/ioctl.c
parent40380f1c7841a5dcbf0b20f0b6da11969211ef77 (diff)
parentbc3f116fec194f1d7329b160c266fe16b9266a1e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes and features from Chris Mason: "We've merged in the error handling patches from SuSE. These are already shipping in the sles kernel, and they give btrfs the ability to abort transactions and go readonly on errors. It involves a lot of churn as they clarify BUG_ONs, and remove the ones we now properly deal with. Josef reworked the way our metadata interacts with the page cache. page->private now points to the btrfs extent_buffer object, which makes everything faster. He changed it so we write an whole extent buffer at a time instead of allowing individual pages to go down,, which will be important for the raid5/6 code (for the 3.5 merge window ;) Josef also made us more aggressive about dropping pages for metadata blocks that were freed due to COW. Overall, our metadata caching is much faster now. We've integrated my patch for metadata bigger than the page size. This allows metadata blocks up to 64KB in size. In practice 16K and 32K seem to work best. For workloads with lots of metadata, this cuts down the size of the extent allocation tree dramatically and fragments much less. Scrub was updated to support the larger block sizes, which ended up being a fairly large change (thanks Stefan Behrens). We also have an assortment of fixes and updates, especially to the balancing code (Ilya Dryomov), the back ref walker (Jan Schmidt) and the defragging code (Liu Bo)." Fixed up trivial conflicts in fs/btrfs/scrub.c that were just due to removal of the second argument to k[un]map_atomic() in commit 7ac687d9e047. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (75 commits) Btrfs: update the checks for mixed block groups with big metadata blocks Btrfs: update to the right index of defragment Btrfs: do not bother to defrag an extent if it is a big real extent Btrfs: add a check to decide if we should defrag the range Btrfs: fix recursive defragment with autodefrag option Btrfs: fix the mismatch of page->mapping Btrfs: fix race between direct io and autodefrag Btrfs: fix deadlock during allocating chunks Btrfs: show useful info in space reservation tracepoint Btrfs: don't use crc items bigger than 4KB Btrfs: flush out and clean up any block device pages during mount btrfs: disallow unequal data/metadata blocksize for mixed block groups Btrfs: enhance superblock sanity checks Btrfs: change scrub to support big blocks Btrfs: minor cleanup in scrub Btrfs: introduce common define for max number of mirrors Btrfs: fix infinite loop in btrfs_shrink_device() Btrfs: fix memory leak in resolver code Btrfs: allow dup for data chunks in mixed mode Btrfs: validate target profiles only if we are going to use them ...
Diffstat (limited to 'fs/btrfs/ioctl.c')
-rw-r--r--fs/btrfs/ioctl.c194
1 files changed, 145 insertions, 49 deletions
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d8b54715c2de..18cc23d164a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -425,22 +425,37 @@ static noinline int create_subvol(struct btrfs_root *root,
425 425
426 key.offset = (u64)-1; 426 key.offset = (u64)-1;
427 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); 427 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
428 BUG_ON(IS_ERR(new_root)); 428 if (IS_ERR(new_root)) {
429 btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
430 ret = PTR_ERR(new_root);
431 goto fail;
432 }
429 433
430 btrfs_record_root_in_trans(trans, new_root); 434 btrfs_record_root_in_trans(trans, new_root);
431 435
432 ret = btrfs_create_subvol_root(trans, new_root, new_dirid); 436 ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
437 if (ret) {
438 /* We potentially lose an unused inode item here */
439 btrfs_abort_transaction(trans, root, ret);
440 goto fail;
441 }
442
433 /* 443 /*
434 * insert the directory item 444 * insert the directory item
435 */ 445 */
436 ret = btrfs_set_inode_index(dir, &index); 446 ret = btrfs_set_inode_index(dir, &index);
437 BUG_ON(ret); 447 if (ret) {
448 btrfs_abort_transaction(trans, root, ret);
449 goto fail;
450 }
438 451
439 ret = btrfs_insert_dir_item(trans, root, 452 ret = btrfs_insert_dir_item(trans, root,
440 name, namelen, dir, &key, 453 name, namelen, dir, &key,
441 BTRFS_FT_DIR, index); 454 BTRFS_FT_DIR, index);
442 if (ret) 455 if (ret) {
456 btrfs_abort_transaction(trans, root, ret);
443 goto fail; 457 goto fail;
458 }
444 459
445 btrfs_i_size_write(dir, dir->i_size + namelen * 2); 460 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
446 ret = btrfs_update_inode(trans, root, dir); 461 ret = btrfs_update_inode(trans, root, dir);
@@ -769,6 +784,31 @@ none:
769 return -ENOENT; 784 return -ENOENT;
770} 785}
771 786
787/*
788 * Validaty check of prev em and next em:
789 * 1) no prev/next em
790 * 2) prev/next em is an hole/inline extent
791 */
792static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
793{
794 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
795 struct extent_map *prev = NULL, *next = NULL;
796 int ret = 0;
797
798 read_lock(&em_tree->lock);
799 prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
800 next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
801 read_unlock(&em_tree->lock);
802
803 if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
804 (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
805 ret = 1;
806 free_extent_map(prev);
807 free_extent_map(next);
808
809 return ret;
810}
811
772static int should_defrag_range(struct inode *inode, u64 start, u64 len, 812static int should_defrag_range(struct inode *inode, u64 start, u64 len,
773 int thresh, u64 *last_len, u64 *skip, 813 int thresh, u64 *last_len, u64 *skip,
774 u64 *defrag_end) 814 u64 *defrag_end)
@@ -797,17 +837,25 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
797 837
798 if (!em) { 838 if (!em) {
799 /* get the big lock and read metadata off disk */ 839 /* get the big lock and read metadata off disk */
800 lock_extent(io_tree, start, start + len - 1, GFP_NOFS); 840 lock_extent(io_tree, start, start + len - 1);
801 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 841 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
802 unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); 842 unlock_extent(io_tree, start, start + len - 1);
803 843
804 if (IS_ERR(em)) 844 if (IS_ERR(em))
805 return 0; 845 return 0;
806 } 846 }
807 847
808 /* this will cover holes, and inline extents */ 848 /* this will cover holes, and inline extents */
809 if (em->block_start >= EXTENT_MAP_LAST_BYTE) 849 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
810 ret = 0; 850 ret = 0;
851 goto out;
852 }
853
854 /* If we have nothing to merge with us, just skip. */
855 if (check_adjacent_extents(inode, em)) {
856 ret = 0;
857 goto out;
858 }
811 859
812 /* 860 /*
813 * we hit a real extent, if it is big don't bother defragging it again 861 * we hit a real extent, if it is big don't bother defragging it again
@@ -815,6 +863,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
815 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) 863 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
816 ret = 0; 864 ret = 0;
817 865
866out:
818 /* 867 /*
819 * last_len ends up being a counter of how many bytes we've defragged. 868 * last_len ends up being a counter of how many bytes we've defragged.
820 * every time we choose not to defrag an extent, we reset *last_len 869 * every time we choose not to defrag an extent, we reset *last_len
@@ -856,6 +905,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
856 u64 isize = i_size_read(inode); 905 u64 isize = i_size_read(inode);
857 u64 page_start; 906 u64 page_start;
858 u64 page_end; 907 u64 page_end;
908 u64 page_cnt;
859 int ret; 909 int ret;
860 int i; 910 int i;
861 int i_done; 911 int i_done;
@@ -864,19 +914,21 @@ static int cluster_pages_for_defrag(struct inode *inode,
864 struct extent_io_tree *tree; 914 struct extent_io_tree *tree;
865 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 915 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
866 916
867 if (isize == 0)
868 return 0;
869 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 917 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
918 if (!isize || start_index > file_end)
919 return 0;
920
921 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
870 922
871 ret = btrfs_delalloc_reserve_space(inode, 923 ret = btrfs_delalloc_reserve_space(inode,
872 num_pages << PAGE_CACHE_SHIFT); 924 page_cnt << PAGE_CACHE_SHIFT);
873 if (ret) 925 if (ret)
874 return ret; 926 return ret;
875 i_done = 0; 927 i_done = 0;
876 tree = &BTRFS_I(inode)->io_tree; 928 tree = &BTRFS_I(inode)->io_tree;
877 929
878 /* step one, lock all the pages */ 930 /* step one, lock all the pages */
879 for (i = 0; i < num_pages; i++) { 931 for (i = 0; i < page_cnt; i++) {
880 struct page *page; 932 struct page *page;
881again: 933again:
882 page = find_or_create_page(inode->i_mapping, 934 page = find_or_create_page(inode->i_mapping,
@@ -887,10 +939,10 @@ again:
887 page_start = page_offset(page); 939 page_start = page_offset(page);
888 page_end = page_start + PAGE_CACHE_SIZE - 1; 940 page_end = page_start + PAGE_CACHE_SIZE - 1;
889 while (1) { 941 while (1) {
890 lock_extent(tree, page_start, page_end, GFP_NOFS); 942 lock_extent(tree, page_start, page_end);
891 ordered = btrfs_lookup_ordered_extent(inode, 943 ordered = btrfs_lookup_ordered_extent(inode,
892 page_start); 944 page_start);
893 unlock_extent(tree, page_start, page_end, GFP_NOFS); 945 unlock_extent(tree, page_start, page_end);
894 if (!ordered) 946 if (!ordered)
895 break; 947 break;
896 948
@@ -898,6 +950,15 @@ again:
898 btrfs_start_ordered_extent(inode, ordered, 1); 950 btrfs_start_ordered_extent(inode, ordered, 1);
899 btrfs_put_ordered_extent(ordered); 951 btrfs_put_ordered_extent(ordered);
900 lock_page(page); 952 lock_page(page);
953 /*
954 * we unlocked the page above, so we need check if
955 * it was released or not.
956 */
957 if (page->mapping != inode->i_mapping) {
958 unlock_page(page);
959 page_cache_release(page);
960 goto again;
961 }
901 } 962 }
902 963
903 if (!PageUptodate(page)) { 964 if (!PageUptodate(page)) {
@@ -911,15 +972,6 @@ again:
911 } 972 }
912 } 973 }
913 974
914 isize = i_size_read(inode);
915 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
916 if (!isize || page->index > file_end) {
917 /* whoops, we blew past eof, skip this page */
918 unlock_page(page);
919 page_cache_release(page);
920 break;
921 }
922
923 if (page->mapping != inode->i_mapping) { 975 if (page->mapping != inode->i_mapping) {
924 unlock_page(page); 976 unlock_page(page);
925 page_cache_release(page); 977 page_cache_release(page);
@@ -946,19 +998,18 @@ again:
946 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; 998 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
947 999
948 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1000 lock_extent_bits(&BTRFS_I(inode)->io_tree,
949 page_start, page_end - 1, 0, &cached_state, 1001 page_start, page_end - 1, 0, &cached_state);
950 GFP_NOFS);
951 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, 1002 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
952 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 1003 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
953 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 1004 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
954 GFP_NOFS); 1005 GFP_NOFS);
955 1006
956 if (i_done != num_pages) { 1007 if (i_done != page_cnt) {
957 spin_lock(&BTRFS_I(inode)->lock); 1008 spin_lock(&BTRFS_I(inode)->lock);
958 BTRFS_I(inode)->outstanding_extents++; 1009 BTRFS_I(inode)->outstanding_extents++;
959 spin_unlock(&BTRFS_I(inode)->lock); 1010 spin_unlock(&BTRFS_I(inode)->lock);
960 btrfs_delalloc_release_space(inode, 1011 btrfs_delalloc_release_space(inode,
961 (num_pages - i_done) << PAGE_CACHE_SHIFT); 1012 (page_cnt - i_done) << PAGE_CACHE_SHIFT);
962 } 1013 }
963 1014
964 1015
@@ -983,7 +1034,7 @@ out:
983 unlock_page(pages[i]); 1034 unlock_page(pages[i]);
984 page_cache_release(pages[i]); 1035 page_cache_release(pages[i]);
985 } 1036 }
986 btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); 1037 btrfs_delalloc_release_space(inode, page_cnt << PAGE_CACHE_SHIFT);
987 return ret; 1038 return ret;
988 1039
989} 1040}
@@ -1089,12 +1140,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1089 if (!(inode->i_sb->s_flags & MS_ACTIVE)) 1140 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1090 break; 1141 break;
1091 1142
1092 if (!newer_than && 1143 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
1093 !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1144 PAGE_CACHE_SIZE, extent_thresh,
1094 PAGE_CACHE_SIZE, 1145 &last_len, &skip, &defrag_end)) {
1095 extent_thresh,
1096 &last_len, &skip,
1097 &defrag_end)) {
1098 unsigned long next; 1146 unsigned long next;
1099 /* 1147 /*
1100 * the should_defrag function tells us how much to skip 1148 * the should_defrag function tells us how much to skip
@@ -1123,17 +1171,24 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1123 ra_index += max_cluster; 1171 ra_index += max_cluster;
1124 } 1172 }
1125 1173
1174 mutex_lock(&inode->i_mutex);
1126 ret = cluster_pages_for_defrag(inode, pages, i, cluster); 1175 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1127 if (ret < 0) 1176 if (ret < 0) {
1177 mutex_unlock(&inode->i_mutex);
1128 goto out_ra; 1178 goto out_ra;
1179 }
1129 1180
1130 defrag_count += ret; 1181 defrag_count += ret;
1131 balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); 1182 balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
1183 mutex_unlock(&inode->i_mutex);
1132 1184
1133 if (newer_than) { 1185 if (newer_than) {
1134 if (newer_off == (u64)-1) 1186 if (newer_off == (u64)-1)
1135 break; 1187 break;
1136 1188
1189 if (ret > 0)
1190 i += ret;
1191
1137 newer_off = max(newer_off + 1, 1192 newer_off = max(newer_off + 1,
1138 (u64)i << PAGE_CACHE_SHIFT); 1193 (u64)i << PAGE_CACHE_SHIFT);
1139 1194
@@ -1966,7 +2021,11 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1966 dest->root_key.objectid, 2021 dest->root_key.objectid,
1967 dentry->d_name.name, 2022 dentry->d_name.name,
1968 dentry->d_name.len); 2023 dentry->d_name.len);
1969 BUG_ON(ret); 2024 if (ret) {
2025 err = ret;
2026 btrfs_abort_transaction(trans, root, ret);
2027 goto out_end_trans;
2028 }
1970 2029
1971 btrfs_record_root_in_trans(trans, dest); 2030 btrfs_record_root_in_trans(trans, dest);
1972 2031
@@ -1979,11 +2038,16 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1979 ret = btrfs_insert_orphan_item(trans, 2038 ret = btrfs_insert_orphan_item(trans,
1980 root->fs_info->tree_root, 2039 root->fs_info->tree_root,
1981 dest->root_key.objectid); 2040 dest->root_key.objectid);
1982 BUG_ON(ret); 2041 if (ret) {
2042 btrfs_abort_transaction(trans, root, ret);
2043 err = ret;
2044 goto out_end_trans;
2045 }
1983 } 2046 }
1984 2047out_end_trans:
1985 ret = btrfs_end_transaction(trans, root); 2048 ret = btrfs_end_transaction(trans, root);
1986 BUG_ON(ret); 2049 if (ret && !err)
2050 err = ret;
1987 inode->i_flags |= S_DEAD; 2051 inode->i_flags |= S_DEAD;
1988out_up_write: 2052out_up_write:
1989 up_write(&root->fs_info->subvol_sem); 2053 up_write(&root->fs_info->subvol_sem);
@@ -2326,13 +2390,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2326 another, and lock file content */ 2390 another, and lock file content */
2327 while (1) { 2391 while (1) {
2328 struct btrfs_ordered_extent *ordered; 2392 struct btrfs_ordered_extent *ordered;
2329 lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2393 lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2330 ordered = btrfs_lookup_first_ordered_extent(src, off+len); 2394 ordered = btrfs_lookup_first_ordered_extent(src, off+len);
2331 if (!ordered && 2395 if (!ordered &&
2332 !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, 2396 !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
2333 EXTENT_DELALLOC, 0, NULL)) 2397 EXTENT_DELALLOC, 0, NULL))
2334 break; 2398 break;
2335 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2399 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2336 if (ordered) 2400 if (ordered)
2337 btrfs_put_ordered_extent(ordered); 2401 btrfs_put_ordered_extent(ordered);
2338 btrfs_wait_ordered_range(src, off, len); 2402 btrfs_wait_ordered_range(src, off, len);
@@ -2447,11 +2511,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2447 new_key.offset, 2511 new_key.offset,
2448 new_key.offset + datal, 2512 new_key.offset + datal,
2449 &hint_byte, 1); 2513 &hint_byte, 1);
2450 BUG_ON(ret); 2514 if (ret) {
2515 btrfs_abort_transaction(trans, root,
2516 ret);
2517 btrfs_end_transaction(trans, root);
2518 goto out;
2519 }
2451 2520
2452 ret = btrfs_insert_empty_item(trans, root, path, 2521 ret = btrfs_insert_empty_item(trans, root, path,
2453 &new_key, size); 2522 &new_key, size);
2454 BUG_ON(ret); 2523 if (ret) {
2524 btrfs_abort_transaction(trans, root,
2525 ret);
2526 btrfs_end_transaction(trans, root);
2527 goto out;
2528 }
2455 2529
2456 leaf = path->nodes[0]; 2530 leaf = path->nodes[0];
2457 slot = path->slots[0]; 2531 slot = path->slots[0];
@@ -2478,7 +2552,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2478 btrfs_ino(inode), 2552 btrfs_ino(inode),
2479 new_key.offset - datao, 2553 new_key.offset - datao,
2480 0); 2554 0);
2481 BUG_ON(ret); 2555 if (ret) {
2556 btrfs_abort_transaction(trans,
2557 root,
2558 ret);
2559 btrfs_end_transaction(trans,
2560 root);
2561 goto out;
2562
2563 }
2482 } 2564 }
2483 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 2565 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
2484 u64 skip = 0; 2566 u64 skip = 0;
@@ -2503,11 +2585,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2503 new_key.offset, 2585 new_key.offset,
2504 new_key.offset + datal, 2586 new_key.offset + datal,
2505 &hint_byte, 1); 2587 &hint_byte, 1);
2506 BUG_ON(ret); 2588 if (ret) {
2589 btrfs_abort_transaction(trans, root,
2590 ret);
2591 btrfs_end_transaction(trans, root);
2592 goto out;
2593 }
2507 2594
2508 ret = btrfs_insert_empty_item(trans, root, path, 2595 ret = btrfs_insert_empty_item(trans, root, path,
2509 &new_key, size); 2596 &new_key, size);
2510 BUG_ON(ret); 2597 if (ret) {
2598 btrfs_abort_transaction(trans, root,
2599 ret);
2600 btrfs_end_transaction(trans, root);
2601 goto out;
2602 }
2511 2603
2512 if (skip) { 2604 if (skip) {
2513 u32 start = 2605 u32 start =
@@ -2541,8 +2633,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2541 btrfs_i_size_write(inode, endoff); 2633 btrfs_i_size_write(inode, endoff);
2542 2634
2543 ret = btrfs_update_inode(trans, root, inode); 2635 ret = btrfs_update_inode(trans, root, inode);
2544 BUG_ON(ret); 2636 if (ret) {
2545 btrfs_end_transaction(trans, root); 2637 btrfs_abort_transaction(trans, root, ret);
2638 btrfs_end_transaction(trans, root);
2639 goto out;
2640 }
2641 ret = btrfs_end_transaction(trans, root);
2546 } 2642 }
2547next: 2643next:
2548 btrfs_release_path(path); 2644 btrfs_release_path(path);
@@ -2551,7 +2647,7 @@ next:
2551 ret = 0; 2647 ret = 0;
2552out: 2648out:
2553 btrfs_release_path(path); 2649 btrfs_release_path(path);
2554 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2650 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2555out_unlock: 2651out_unlock:
2556 mutex_unlock(&src->i_mutex); 2652 mutex_unlock(&src->i_mutex);
2557 mutex_unlock(&inode->i_mutex); 2653 mutex_unlock(&inode->i_mutex);
@@ -3066,8 +3162,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
3066 goto out; 3162 goto out;
3067 3163
3068 extent_item_pos = loi->logical - key.objectid; 3164 extent_item_pos = loi->logical - key.objectid;
3069 ret = iterate_extent_inodes(root->fs_info, path, key.objectid, 3165 ret = iterate_extent_inodes(root->fs_info, key.objectid,
3070 extent_item_pos, build_ino_list, 3166 extent_item_pos, 0, build_ino_list,
3071 inodes); 3167 inodes);
3072 3168
3073 if (ret < 0) 3169 if (ret < 0)