diff options
author | Chris Mason <chris.mason@oracle.com> | 2012-01-16 15:26:17 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2012-01-16 15:26:17 -0500 |
commit | d756bd2d9339447c29bde950910586df8f8941ec (patch) | |
tree | f96aeb682bcc4fdcf75d080f260c809b9fbc4a1a | |
parent | 27263e28321db438bc43dc0c0be432ce91526224 (diff) | |
parent | b367e47fb3a70f5d24ebd6faf7d42436d485fb2d (diff) |
Merge branch 'for-chris' of git://repo.or.cz/linux-btrfs-devel into integration
Conflicts:
fs/btrfs/volumes.c
Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r-- | fs/btrfs/disk-io.c | 2 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 3 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 293 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 20 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 185 | ||||
-rw-r--r-- | fs/btrfs/volumes.h | 3 |
6 files changed, 280 insertions, 226 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9c1a744e595b..e5167219c266 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -2278,9 +2278,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
2278 | (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), | 2278 | (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), |
2279 | BTRFS_UUID_SIZE); | 2279 | BTRFS_UUID_SIZE); |
2280 | 2280 | ||
2281 | mutex_lock(&fs_info->chunk_mutex); | ||
2282 | ret = btrfs_read_chunk_tree(chunk_root); | 2281 | ret = btrfs_read_chunk_tree(chunk_root); |
2283 | mutex_unlock(&fs_info->chunk_mutex); | ||
2284 | if (ret) { | 2282 | if (ret) { |
2285 | printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", | 2283 | printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", |
2286 | sb->s_id); | 2284 | sb->s_id); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 352083ad233c..1c1cf216be80 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -7143,7 +7143,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) | |||
7143 | * space to fit our block group in. | 7143 | * space to fit our block group in. |
7144 | */ | 7144 | */ |
7145 | if (device->total_bytes > device->bytes_used + min_free) { | 7145 | if (device->total_bytes > device->bytes_used + min_free) { |
7146 | ret = find_free_dev_extent(NULL, device, min_free, | 7146 | ret = find_free_dev_extent(device, min_free, |
7147 | &dev_offset, NULL); | 7147 | &dev_offset, NULL); |
7148 | if (!ret) | 7148 | if (!ret) |
7149 | dev_nr++; | 7149 | dev_nr++; |
@@ -7505,6 +7505,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
7505 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, | 7505 | ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, |
7506 | &cache->space_info); | 7506 | &cache->space_info); |
7507 | BUG_ON(ret); | 7507 | BUG_ON(ret); |
7508 | update_global_block_rsv(root->fs_info); | ||
7508 | 7509 | ||
7509 | spin_lock(&cache->space_info->lock); | 7510 | spin_lock(&cache->space_info->lock); |
7510 | cache->space_info->bytes_readonly += cache->bytes_super; | 7511 | cache->space_info->bytes_readonly += cache->bytes_super; |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ce40db59c706..6c7887a7770c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -319,9 +319,11 @@ static void io_ctl_drop_pages(struct io_ctl *io_ctl) | |||
319 | io_ctl_unmap_page(io_ctl); | 319 | io_ctl_unmap_page(io_ctl); |
320 | 320 | ||
321 | for (i = 0; i < io_ctl->num_pages; i++) { | 321 | for (i = 0; i < io_ctl->num_pages; i++) { |
322 | ClearPageChecked(io_ctl->pages[i]); | 322 | if (io_ctl->pages[i]) { |
323 | unlock_page(io_ctl->pages[i]); | 323 | ClearPageChecked(io_ctl->pages[i]); |
324 | page_cache_release(io_ctl->pages[i]); | 324 | unlock_page(io_ctl->pages[i]); |
325 | page_cache_release(io_ctl->pages[i]); | ||
326 | } | ||
325 | } | 327 | } |
326 | } | 328 | } |
327 | 329 | ||
@@ -635,7 +637,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
635 | if (!num_entries) | 637 | if (!num_entries) |
636 | return 0; | 638 | return 0; |
637 | 639 | ||
638 | io_ctl_init(&io_ctl, inode, root); | 640 | ret = io_ctl_init(&io_ctl, inode, root); |
641 | if (ret) | ||
642 | return ret; | ||
643 | |||
639 | ret = readahead_cache(inode); | 644 | ret = readahead_cache(inode); |
640 | if (ret) | 645 | if (ret) |
641 | goto out; | 646 | goto out; |
@@ -838,7 +843,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
838 | struct io_ctl io_ctl; | 843 | struct io_ctl io_ctl; |
839 | struct list_head bitmap_list; | 844 | struct list_head bitmap_list; |
840 | struct btrfs_key key; | 845 | struct btrfs_key key; |
841 | u64 start, end, len; | 846 | u64 start, extent_start, extent_end, len; |
842 | int entries = 0; | 847 | int entries = 0; |
843 | int bitmaps = 0; | 848 | int bitmaps = 0; |
844 | int ret; | 849 | int ret; |
@@ -849,7 +854,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
849 | if (!i_size_read(inode)) | 854 | if (!i_size_read(inode)) |
850 | return -1; | 855 | return -1; |
851 | 856 | ||
852 | io_ctl_init(&io_ctl, inode, root); | 857 | ret = io_ctl_init(&io_ctl, inode, root); |
858 | if (ret) | ||
859 | return -1; | ||
853 | 860 | ||
854 | /* Get the cluster for this block_group if it exists */ | 861 | /* Get the cluster for this block_group if it exists */ |
855 | if (block_group && !list_empty(&block_group->cluster_list)) | 862 | if (block_group && !list_empty(&block_group->cluster_list)) |
@@ -857,25 +864,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
857 | struct btrfs_free_cluster, | 864 | struct btrfs_free_cluster, |
858 | block_group_list); | 865 | block_group_list); |
859 | 866 | ||
860 | /* | ||
861 | * We shouldn't have switched the pinned extents yet so this is the | ||
862 | * right one | ||
863 | */ | ||
864 | unpin = root->fs_info->pinned_extents; | ||
865 | |||
866 | /* Lock all pages first so we can lock the extent safely. */ | 867 | /* Lock all pages first so we can lock the extent safely. */ |
867 | io_ctl_prepare_pages(&io_ctl, inode, 0); | 868 | io_ctl_prepare_pages(&io_ctl, inode, 0); |
868 | 869 | ||
869 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | 870 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
870 | 0, &cached_state, GFP_NOFS); | 871 | 0, &cached_state, GFP_NOFS); |
871 | 872 | ||
872 | /* | ||
873 | * When searching for pinned extents, we need to start at our start | ||
874 | * offset. | ||
875 | */ | ||
876 | if (block_group) | ||
877 | start = block_group->key.objectid; | ||
878 | |||
879 | node = rb_first(&ctl->free_space_offset); | 873 | node = rb_first(&ctl->free_space_offset); |
880 | if (!node && cluster) { | 874 | if (!node && cluster) { |
881 | node = rb_first(&cluster->root); | 875 | node = rb_first(&cluster->root); |
@@ -918,9 +912,20 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
918 | * We want to add any pinned extents to our free space cache | 912 | * We want to add any pinned extents to our free space cache |
919 | * so we don't leak the space | 913 | * so we don't leak the space |
920 | */ | 914 | */ |
915 | |||
916 | /* | ||
917 | * We shouldn't have switched the pinned extents yet so this is the | ||
918 | * right one | ||
919 | */ | ||
920 | unpin = root->fs_info->pinned_extents; | ||
921 | |||
922 | if (block_group) | ||
923 | start = block_group->key.objectid; | ||
924 | |||
921 | while (block_group && (start < block_group->key.objectid + | 925 | while (block_group && (start < block_group->key.objectid + |
922 | block_group->key.offset)) { | 926 | block_group->key.offset)) { |
923 | ret = find_first_extent_bit(unpin, start, &start, &end, | 927 | ret = find_first_extent_bit(unpin, start, |
928 | &extent_start, &extent_end, | ||
924 | EXTENT_DIRTY); | 929 | EXTENT_DIRTY); |
925 | if (ret) { | 930 | if (ret) { |
926 | ret = 0; | 931 | ret = 0; |
@@ -928,20 +933,21 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
928 | } | 933 | } |
929 | 934 | ||
930 | /* This pinned extent is out of our range */ | 935 | /* This pinned extent is out of our range */ |
931 | if (start >= block_group->key.objectid + | 936 | if (extent_start >= block_group->key.objectid + |
932 | block_group->key.offset) | 937 | block_group->key.offset) |
933 | break; | 938 | break; |
934 | 939 | ||
935 | len = block_group->key.objectid + | 940 | extent_start = max(extent_start, start); |
936 | block_group->key.offset - start; | 941 | extent_end = min(block_group->key.objectid + |
937 | len = min(len, end + 1 - start); | 942 | block_group->key.offset, extent_end + 1); |
943 | len = extent_end - extent_start; | ||
938 | 944 | ||
939 | entries++; | 945 | entries++; |
940 | ret = io_ctl_add_entry(&io_ctl, start, len, NULL); | 946 | ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL); |
941 | if (ret) | 947 | if (ret) |
942 | goto out_nospc; | 948 | goto out_nospc; |
943 | 949 | ||
944 | start = end + 1; | 950 | start = extent_end; |
945 | } | 951 | } |
946 | 952 | ||
947 | /* Write out the bitmaps */ | 953 | /* Write out the bitmaps */ |
@@ -2574,17 +2580,57 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
2574 | cluster->block_group = NULL; | 2580 | cluster->block_group = NULL; |
2575 | } | 2581 | } |
2576 | 2582 | ||
2577 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | 2583 | static int do_trimming(struct btrfs_block_group_cache *block_group, |
2578 | u64 *trimmed, u64 start, u64 end, u64 minlen) | 2584 | u64 *total_trimmed, u64 start, u64 bytes, |
2585 | u64 reserved_start, u64 reserved_bytes) | ||
2579 | { | 2586 | { |
2580 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2587 | struct btrfs_space_info *space_info = block_group->space_info; |
2581 | struct btrfs_free_space *entry = NULL; | ||
2582 | struct btrfs_fs_info *fs_info = block_group->fs_info; | 2588 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2583 | u64 bytes = 0; | 2589 | int ret; |
2584 | u64 actually_trimmed; | 2590 | int update = 0; |
2585 | int ret = 0; | 2591 | u64 trimmed = 0; |
2586 | 2592 | ||
2587 | *trimmed = 0; | 2593 | spin_lock(&space_info->lock); |
2594 | spin_lock(&block_group->lock); | ||
2595 | if (!block_group->ro) { | ||
2596 | block_group->reserved += reserved_bytes; | ||
2597 | space_info->bytes_reserved += reserved_bytes; | ||
2598 | update = 1; | ||
2599 | } | ||
2600 | spin_unlock(&block_group->lock); | ||
2601 | spin_unlock(&space_info->lock); | ||
2602 | |||
2603 | ret = btrfs_error_discard_extent(fs_info->extent_root, | ||
2604 | start, bytes, &trimmed); | ||
2605 | if (!ret) | ||
2606 | *total_trimmed += trimmed; | ||
2607 | |||
2608 | btrfs_add_free_space(block_group, reserved_start, reserved_bytes); | ||
2609 | |||
2610 | if (update) { | ||
2611 | spin_lock(&space_info->lock); | ||
2612 | spin_lock(&block_group->lock); | ||
2613 | if (block_group->ro) | ||
2614 | space_info->bytes_readonly += reserved_bytes; | ||
2615 | block_group->reserved -= reserved_bytes; | ||
2616 | space_info->bytes_reserved -= reserved_bytes; | ||
2617 | spin_unlock(&space_info->lock); | ||
2618 | spin_unlock(&block_group->lock); | ||
2619 | } | ||
2620 | |||
2621 | return ret; | ||
2622 | } | ||
2623 | |||
2624 | static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, | ||
2625 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | ||
2626 | { | ||
2627 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2628 | struct btrfs_free_space *entry; | ||
2629 | struct rb_node *node; | ||
2630 | int ret = 0; | ||
2631 | u64 extent_start; | ||
2632 | u64 extent_bytes; | ||
2633 | u64 bytes; | ||
2588 | 2634 | ||
2589 | while (start < end) { | 2635 | while (start < end) { |
2590 | spin_lock(&ctl->tree_lock); | 2636 | spin_lock(&ctl->tree_lock); |
@@ -2595,81 +2641,118 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2595 | } | 2641 | } |
2596 | 2642 | ||
2597 | entry = tree_search_offset(ctl, start, 0, 1); | 2643 | entry = tree_search_offset(ctl, start, 0, 1); |
2598 | if (!entry) | 2644 | if (!entry) { |
2599 | entry = tree_search_offset(ctl, | ||
2600 | offset_to_bitmap(ctl, start), | ||
2601 | 1, 1); | ||
2602 | |||
2603 | if (!entry || entry->offset >= end) { | ||
2604 | spin_unlock(&ctl->tree_lock); | 2645 | spin_unlock(&ctl->tree_lock); |
2605 | break; | 2646 | break; |
2606 | } | 2647 | } |
2607 | 2648 | ||
2608 | if (entry->bitmap) { | 2649 | /* skip bitmaps */ |
2609 | ret = search_bitmap(ctl, entry, &start, &bytes); | 2650 | while (entry->bitmap) { |
2610 | if (!ret) { | 2651 | node = rb_next(&entry->offset_index); |
2611 | if (start >= end) { | 2652 | if (!node) { |
2612 | spin_unlock(&ctl->tree_lock); | ||
2613 | break; | ||
2614 | } | ||
2615 | bytes = min(bytes, end - start); | ||
2616 | bitmap_clear_bits(ctl, entry, start, bytes); | ||
2617 | if (entry->bytes == 0) | ||
2618 | free_bitmap(ctl, entry); | ||
2619 | } else { | ||
2620 | start = entry->offset + BITS_PER_BITMAP * | ||
2621 | block_group->sectorsize; | ||
2622 | spin_unlock(&ctl->tree_lock); | 2653 | spin_unlock(&ctl->tree_lock); |
2623 | ret = 0; | 2654 | goto out; |
2624 | continue; | ||
2625 | } | 2655 | } |
2626 | } else { | 2656 | entry = rb_entry(node, struct btrfs_free_space, |
2627 | start = entry->offset; | 2657 | offset_index); |
2628 | bytes = min(entry->bytes, end - start); | 2658 | } |
2629 | unlink_free_space(ctl, entry); | 2659 | |
2630 | kmem_cache_free(btrfs_free_space_cachep, entry); | 2660 | if (entry->offset >= end) { |
2661 | spin_unlock(&ctl->tree_lock); | ||
2662 | break; | ||
2631 | } | 2663 | } |
2632 | 2664 | ||
2665 | extent_start = entry->offset; | ||
2666 | extent_bytes = entry->bytes; | ||
2667 | start = max(start, extent_start); | ||
2668 | bytes = min(extent_start + extent_bytes, end) - start; | ||
2669 | if (bytes < minlen) { | ||
2670 | spin_unlock(&ctl->tree_lock); | ||
2671 | goto next; | ||
2672 | } | ||
2673 | |||
2674 | unlink_free_space(ctl, entry); | ||
2675 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2676 | |||
2633 | spin_unlock(&ctl->tree_lock); | 2677 | spin_unlock(&ctl->tree_lock); |
2634 | 2678 | ||
2635 | if (bytes >= minlen) { | 2679 | ret = do_trimming(block_group, total_trimmed, start, bytes, |
2636 | struct btrfs_space_info *space_info; | 2680 | extent_start, extent_bytes); |
2637 | int update = 0; | 2681 | if (ret) |
2638 | 2682 | break; | |
2639 | space_info = block_group->space_info; | 2683 | next: |
2640 | spin_lock(&space_info->lock); | 2684 | start += bytes; |
2641 | spin_lock(&block_group->lock); | ||
2642 | if (!block_group->ro) { | ||
2643 | block_group->reserved += bytes; | ||
2644 | space_info->bytes_reserved += bytes; | ||
2645 | update = 1; | ||
2646 | } | ||
2647 | spin_unlock(&block_group->lock); | ||
2648 | spin_unlock(&space_info->lock); | ||
2649 | |||
2650 | ret = btrfs_error_discard_extent(fs_info->extent_root, | ||
2651 | start, | ||
2652 | bytes, | ||
2653 | &actually_trimmed); | ||
2654 | |||
2655 | btrfs_add_free_space(block_group, start, bytes); | ||
2656 | if (update) { | ||
2657 | spin_lock(&space_info->lock); | ||
2658 | spin_lock(&block_group->lock); | ||
2659 | if (block_group->ro) | ||
2660 | space_info->bytes_readonly += bytes; | ||
2661 | block_group->reserved -= bytes; | ||
2662 | space_info->bytes_reserved -= bytes; | ||
2663 | spin_unlock(&space_info->lock); | ||
2664 | spin_unlock(&block_group->lock); | ||
2665 | } | ||
2666 | 2685 | ||
2667 | if (ret) | 2686 | if (fatal_signal_pending(current)) { |
2668 | break; | 2687 | ret = -ERESTARTSYS; |
2669 | *trimmed += actually_trimmed; | 2688 | break; |
2689 | } | ||
2690 | |||
2691 | cond_resched(); | ||
2692 | } | ||
2693 | out: | ||
2694 | return ret; | ||
2695 | } | ||
2696 | |||
2697 | static int trim_bitmaps(struct btrfs_block_group_cache *block_group, | ||
2698 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | ||
2699 | { | ||
2700 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2701 | struct btrfs_free_space *entry; | ||
2702 | int ret = 0; | ||
2703 | int ret2; | ||
2704 | u64 bytes; | ||
2705 | u64 offset = offset_to_bitmap(ctl, start); | ||
2706 | |||
2707 | while (offset < end) { | ||
2708 | bool next_bitmap = false; | ||
2709 | |||
2710 | spin_lock(&ctl->tree_lock); | ||
2711 | |||
2712 | if (ctl->free_space < minlen) { | ||
2713 | spin_unlock(&ctl->tree_lock); | ||
2714 | break; | ||
2715 | } | ||
2716 | |||
2717 | entry = tree_search_offset(ctl, offset, 1, 0); | ||
2718 | if (!entry) { | ||
2719 | spin_unlock(&ctl->tree_lock); | ||
2720 | next_bitmap = true; | ||
2721 | goto next; | ||
2722 | } | ||
2723 | |||
2724 | bytes = minlen; | ||
2725 | ret2 = search_bitmap(ctl, entry, &start, &bytes); | ||
2726 | if (ret2 || start >= end) { | ||
2727 | spin_unlock(&ctl->tree_lock); | ||
2728 | next_bitmap = true; | ||
2729 | goto next; | ||
2730 | } | ||
2731 | |||
2732 | bytes = min(bytes, end - start); | ||
2733 | if (bytes < minlen) { | ||
2734 | spin_unlock(&ctl->tree_lock); | ||
2735 | goto next; | ||
2736 | } | ||
2737 | |||
2738 | bitmap_clear_bits(ctl, entry, start, bytes); | ||
2739 | if (entry->bytes == 0) | ||
2740 | free_bitmap(ctl, entry); | ||
2741 | |||
2742 | spin_unlock(&ctl->tree_lock); | ||
2743 | |||
2744 | ret = do_trimming(block_group, total_trimmed, start, bytes, | ||
2745 | start, bytes); | ||
2746 | if (ret) | ||
2747 | break; | ||
2748 | next: | ||
2749 | if (next_bitmap) { | ||
2750 | offset += BITS_PER_BITMAP * ctl->unit; | ||
2751 | } else { | ||
2752 | start += bytes; | ||
2753 | if (start >= offset + BITS_PER_BITMAP * ctl->unit) | ||
2754 | offset += BITS_PER_BITMAP * ctl->unit; | ||
2670 | } | 2755 | } |
2671 | start += bytes; | ||
2672 | bytes = 0; | ||
2673 | 2756 | ||
2674 | if (fatal_signal_pending(current)) { | 2757 | if (fatal_signal_pending(current)) { |
2675 | ret = -ERESTARTSYS; | 2758 | ret = -ERESTARTSYS; |
@@ -2682,6 +2765,22 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2682 | return ret; | 2765 | return ret; |
2683 | } | 2766 | } |
2684 | 2767 | ||
2768 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | ||
2769 | u64 *trimmed, u64 start, u64 end, u64 minlen) | ||
2770 | { | ||
2771 | int ret; | ||
2772 | |||
2773 | *trimmed = 0; | ||
2774 | |||
2775 | ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); | ||
2776 | if (ret) | ||
2777 | return ret; | ||
2778 | |||
2779 | ret = trim_bitmaps(block_group, trimmed, start, end, minlen); | ||
2780 | |||
2781 | return ret; | ||
2782 | } | ||
2783 | |||
2685 | /* | 2784 | /* |
2686 | * Find the left-most item in the cache tree, and then return the | 2785 | * Find the left-most item in the cache tree, and then return the |
2687 | * smallest inode number in the item. | 2786 | * smallest inode number in the item. |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 1e7a9bac31ab..ef909b5d3d2e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -176,6 +176,8 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
176 | struct btrfs_trans_handle *trans; | 176 | struct btrfs_trans_handle *trans; |
177 | unsigned int flags, oldflags; | 177 | unsigned int flags, oldflags; |
178 | int ret; | 178 | int ret; |
179 | u64 ip_oldflags; | ||
180 | unsigned int i_oldflags; | ||
179 | 181 | ||
180 | if (btrfs_root_readonly(root)) | 182 | if (btrfs_root_readonly(root)) |
181 | return -EROFS; | 183 | return -EROFS; |
@@ -192,6 +194,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
192 | 194 | ||
193 | mutex_lock(&inode->i_mutex); | 195 | mutex_lock(&inode->i_mutex); |
194 | 196 | ||
197 | ip_oldflags = ip->flags; | ||
198 | i_oldflags = inode->i_flags; | ||
199 | |||
195 | flags = btrfs_mask_flags(inode->i_mode, flags); | 200 | flags = btrfs_mask_flags(inode->i_mode, flags); |
196 | oldflags = btrfs_flags_to_ioctl(ip->flags); | 201 | oldflags = btrfs_flags_to_ioctl(ip->flags); |
197 | if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { | 202 | if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { |
@@ -249,19 +254,24 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) | |||
249 | ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); | 254 | ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); |
250 | } | 255 | } |
251 | 256 | ||
252 | trans = btrfs_join_transaction(root); | 257 | trans = btrfs_start_transaction(root, 1); |
253 | BUG_ON(IS_ERR(trans)); | 258 | if (IS_ERR(trans)) { |
259 | ret = PTR_ERR(trans); | ||
260 | goto out_drop; | ||
261 | } | ||
254 | 262 | ||
255 | btrfs_update_iflags(inode); | 263 | btrfs_update_iflags(inode); |
256 | inode->i_ctime = CURRENT_TIME; | 264 | inode->i_ctime = CURRENT_TIME; |
257 | ret = btrfs_update_inode(trans, root, inode); | 265 | ret = btrfs_update_inode(trans, root, inode); |
258 | BUG_ON(ret); | ||
259 | 266 | ||
260 | btrfs_end_transaction(trans, root); | 267 | btrfs_end_transaction(trans, root); |
268 | out_drop: | ||
269 | if (ret) { | ||
270 | ip->flags = ip_oldflags; | ||
271 | inode->i_flags = i_oldflags; | ||
272 | } | ||
261 | 273 | ||
262 | mnt_drop_write(file->f_path.mnt); | 274 | mnt_drop_write(file->f_path.mnt); |
263 | |||
264 | ret = 0; | ||
265 | out_unlock: | 275 | out_unlock: |
266 | mutex_unlock(&inode->i_mutex); | 276 | mutex_unlock(&inode->i_mutex); |
267 | return ret; | 277 | return ret; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 9489a2aca47b..e0b7bb92a170 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -830,7 +830,6 @@ out: | |||
830 | 830 | ||
831 | /* | 831 | /* |
832 | * find_free_dev_extent - find free space in the specified device | 832 | * find_free_dev_extent - find free space in the specified device |
833 | * @trans: transaction handler | ||
834 | * @device: the device which we search the free space in | 833 | * @device: the device which we search the free space in |
835 | * @num_bytes: the size of the free space that we need | 834 | * @num_bytes: the size of the free space that we need |
836 | * @start: store the start of the free space. | 835 | * @start: store the start of the free space. |
@@ -849,8 +848,7 @@ out: | |||
849 | * But if we don't find suitable free space, it is used to store the size of | 848 | * But if we don't find suitable free space, it is used to store the size of |
850 | * the max free space. | 849 | * the max free space. |
851 | */ | 850 | */ |
852 | int find_free_dev_extent(struct btrfs_trans_handle *trans, | 851 | int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, |
853 | struct btrfs_device *device, u64 num_bytes, | ||
854 | u64 *start, u64 *len) | 852 | u64 *start, u64 *len) |
855 | { | 853 | { |
856 | struct btrfs_key key; | 854 | struct btrfs_key key; |
@@ -894,7 +892,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, | |||
894 | key.offset = search_start; | 892 | key.offset = search_start; |
895 | key.type = BTRFS_DEV_EXTENT_KEY; | 893 | key.type = BTRFS_DEV_EXTENT_KEY; |
896 | 894 | ||
897 | ret = btrfs_search_slot(trans, root, &key, path, 0, 0); | 895 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
898 | if (ret < 0) | 896 | if (ret < 0) |
899 | goto out; | 897 | goto out; |
900 | if (ret > 0) { | 898 | if (ret > 0) { |
@@ -1468,8 +1466,7 @@ error_undo: | |||
1468 | /* | 1466 | /* |
1469 | * does all the dirty work required for changing file system's UUID. | 1467 | * does all the dirty work required for changing file system's UUID. |
1470 | */ | 1468 | */ |
1471 | static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, | 1469 | static int btrfs_prepare_sprout(struct btrfs_root *root) |
1472 | struct btrfs_root *root) | ||
1473 | { | 1470 | { |
1474 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | 1471 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; |
1475 | struct btrfs_fs_devices *old_devices; | 1472 | struct btrfs_fs_devices *old_devices; |
@@ -1693,7 +1690,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1693 | 1690 | ||
1694 | if (seeding_dev) { | 1691 | if (seeding_dev) { |
1695 | sb->s_flags &= ~MS_RDONLY; | 1692 | sb->s_flags &= ~MS_RDONLY; |
1696 | ret = btrfs_prepare_sprout(trans, root); | 1693 | ret = btrfs_prepare_sprout(root); |
1697 | BUG_ON(ret); | 1694 | BUG_ON(ret); |
1698 | } | 1695 | } |
1699 | 1696 | ||
@@ -3044,8 +3041,7 @@ done: | |||
3044 | return ret; | 3041 | return ret; |
3045 | } | 3042 | } |
3046 | 3043 | ||
3047 | static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, | 3044 | static int btrfs_add_system_chunk(struct btrfs_root *root, |
3048 | struct btrfs_root *root, | ||
3049 | struct btrfs_key *key, | 3045 | struct btrfs_key *key, |
3050 | struct btrfs_chunk *chunk, int item_size) | 3046 | struct btrfs_chunk *chunk, int item_size) |
3051 | { | 3047 | { |
@@ -3221,7 +3217,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
3221 | if (total_avail == 0) | 3217 | if (total_avail == 0) |
3222 | continue; | 3218 | continue; |
3223 | 3219 | ||
3224 | ret = find_free_dev_extent(trans, device, | 3220 | ret = find_free_dev_extent(device, |
3225 | max_stripe_size * dev_stripes, | 3221 | max_stripe_size * dev_stripes, |
3226 | &dev_offset, &max_avail); | 3222 | &dev_offset, &max_avail); |
3227 | if (ret && ret != -ENOSPC) | 3223 | if (ret && ret != -ENOSPC) |
@@ -3412,7 +3408,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, | |||
3412 | BUG_ON(ret); | 3408 | BUG_ON(ret); |
3413 | 3409 | ||
3414 | if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { | 3410 | if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { |
3415 | ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk, | 3411 | ret = btrfs_add_system_chunk(chunk_root, &key, chunk, |
3416 | item_size); | 3412 | item_size); |
3417 | BUG_ON(ret); | 3413 | BUG_ON(ret); |
3418 | } | 3414 | } |
@@ -3624,26 +3620,13 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | |||
3624 | u64 stripe_nr; | 3620 | u64 stripe_nr; |
3625 | u64 stripe_nr_orig; | 3621 | u64 stripe_nr_orig; |
3626 | u64 stripe_nr_end; | 3622 | u64 stripe_nr_end; |
3627 | int stripes_allocated = 8; | ||
3628 | int stripes_required = 1; | ||
3629 | int stripe_index; | 3623 | int stripe_index; |
3630 | int i; | 3624 | int i; |
3625 | int ret = 0; | ||
3631 | int num_stripes; | 3626 | int num_stripes; |
3632 | int max_errors = 0; | 3627 | int max_errors = 0; |
3633 | struct btrfs_bio *bbio = NULL; | 3628 | struct btrfs_bio *bbio = NULL; |
3634 | 3629 | ||
3635 | if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD))) | ||
3636 | stripes_allocated = 1; | ||
3637 | again: | ||
3638 | if (bbio_ret) { | ||
3639 | bbio = kzalloc(btrfs_bio_size(stripes_allocated), | ||
3640 | GFP_NOFS); | ||
3641 | if (!bbio) | ||
3642 | return -ENOMEM; | ||
3643 | |||
3644 | atomic_set(&bbio->error, 0); | ||
3645 | } | ||
3646 | |||
3647 | read_lock(&em_tree->lock); | 3630 | read_lock(&em_tree->lock); |
3648 | em = lookup_extent_mapping(em_tree, logical, *length); | 3631 | em = lookup_extent_mapping(em_tree, logical, *length); |
3649 | read_unlock(&em_tree->lock); | 3632 | read_unlock(&em_tree->lock); |
@@ -3662,28 +3645,6 @@ again: | |||
3662 | if (mirror_num > map->num_stripes) | 3645 | if (mirror_num > map->num_stripes) |
3663 | mirror_num = 0; | 3646 | mirror_num = 0; |
3664 | 3647 | ||
3665 | /* if our btrfs_bio struct is too small, back off and try again */ | ||
3666 | if (rw & REQ_WRITE) { | ||
3667 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | | ||
3668 | BTRFS_BLOCK_GROUP_DUP)) { | ||
3669 | stripes_required = map->num_stripes; | ||
3670 | max_errors = 1; | ||
3671 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | ||
3672 | stripes_required = map->sub_stripes; | ||
3673 | max_errors = 1; | ||
3674 | } | ||
3675 | } | ||
3676 | if (rw & REQ_DISCARD) { | ||
3677 | if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) | ||
3678 | stripes_required = map->num_stripes; | ||
3679 | } | ||
3680 | if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) && | ||
3681 | stripes_allocated < stripes_required) { | ||
3682 | stripes_allocated = map->num_stripes; | ||
3683 | free_extent_map(em); | ||
3684 | kfree(bbio); | ||
3685 | goto again; | ||
3686 | } | ||
3687 | stripe_nr = offset; | 3648 | stripe_nr = offset; |
3688 | /* | 3649 | /* |
3689 | * stripe_nr counts the total number of stripes we have to stride | 3650 | * stripe_nr counts the total number of stripes we have to stride |
@@ -3775,81 +3736,55 @@ again: | |||
3775 | } | 3736 | } |
3776 | BUG_ON(stripe_index >= map->num_stripes); | 3737 | BUG_ON(stripe_index >= map->num_stripes); |
3777 | 3738 | ||
3739 | bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS); | ||
3740 | if (!bbio) { | ||
3741 | ret = -ENOMEM; | ||
3742 | goto out; | ||
3743 | } | ||
3744 | atomic_set(&bbio->error, 0); | ||
3745 | |||
3778 | if (rw & REQ_DISCARD) { | 3746 | if (rw & REQ_DISCARD) { |
3747 | int factor = 0; | ||
3748 | int sub_stripes = 0; | ||
3749 | u64 stripes_per_dev = 0; | ||
3750 | u32 remaining_stripes = 0; | ||
3751 | |||
3752 | if (map->type & | ||
3753 | (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { | ||
3754 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) | ||
3755 | sub_stripes = 1; | ||
3756 | else | ||
3757 | sub_stripes = map->sub_stripes; | ||
3758 | |||
3759 | factor = map->num_stripes / sub_stripes; | ||
3760 | stripes_per_dev = div_u64_rem(stripe_nr_end - | ||
3761 | stripe_nr_orig, | ||
3762 | factor, | ||
3763 | &remaining_stripes); | ||
3764 | } | ||
3765 | |||
3779 | for (i = 0; i < num_stripes; i++) { | 3766 | for (i = 0; i < num_stripes; i++) { |
3780 | bbio->stripes[i].physical = | 3767 | bbio->stripes[i].physical = |
3781 | map->stripes[stripe_index].physical + | 3768 | map->stripes[stripe_index].physical + |
3782 | stripe_offset + stripe_nr * map->stripe_len; | 3769 | stripe_offset + stripe_nr * map->stripe_len; |
3783 | bbio->stripes[i].dev = map->stripes[stripe_index].dev; | 3770 | bbio->stripes[i].dev = map->stripes[stripe_index].dev; |
3784 | 3771 | ||
3785 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | 3772 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | |
3786 | u64 stripes; | 3773 | BTRFS_BLOCK_GROUP_RAID10)) { |
3787 | u32 last_stripe = 0; | 3774 | bbio->stripes[i].length = stripes_per_dev * |
3788 | int j; | 3775 | map->stripe_len; |
3789 | 3776 | if (i / sub_stripes < remaining_stripes) | |
3790 | div_u64_rem(stripe_nr_end - 1, | 3777 | bbio->stripes[i].length += |
3791 | map->num_stripes, | 3778 | map->stripe_len; |
3792 | &last_stripe); | 3779 | if (i < sub_stripes) |
3793 | |||
3794 | for (j = 0; j < map->num_stripes; j++) { | ||
3795 | u32 test; | ||
3796 | |||
3797 | div_u64_rem(stripe_nr_end - 1 - j, | ||
3798 | map->num_stripes, &test); | ||
3799 | if (test == stripe_index) | ||
3800 | break; | ||
3801 | } | ||
3802 | stripes = stripe_nr_end - 1 - j; | ||
3803 | do_div(stripes, map->num_stripes); | ||
3804 | bbio->stripes[i].length = map->stripe_len * | ||
3805 | (stripes - stripe_nr + 1); | ||
3806 | |||
3807 | if (i == 0) { | ||
3808 | bbio->stripes[i].length -= | ||
3809 | stripe_offset; | ||
3810 | stripe_offset = 0; | ||
3811 | } | ||
3812 | if (stripe_index == last_stripe) | ||
3813 | bbio->stripes[i].length -= | ||
3814 | stripe_end_offset; | ||
3815 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | ||
3816 | u64 stripes; | ||
3817 | int j; | ||
3818 | int factor = map->num_stripes / | ||
3819 | map->sub_stripes; | ||
3820 | u32 last_stripe = 0; | ||
3821 | |||
3822 | div_u64_rem(stripe_nr_end - 1, | ||
3823 | factor, &last_stripe); | ||
3824 | last_stripe *= map->sub_stripes; | ||
3825 | |||
3826 | for (j = 0; j < factor; j++) { | ||
3827 | u32 test; | ||
3828 | |||
3829 | div_u64_rem(stripe_nr_end - 1 - j, | ||
3830 | factor, &test); | ||
3831 | |||
3832 | if (test == | ||
3833 | stripe_index / map->sub_stripes) | ||
3834 | break; | ||
3835 | } | ||
3836 | stripes = stripe_nr_end - 1 - j; | ||
3837 | do_div(stripes, factor); | ||
3838 | bbio->stripes[i].length = map->stripe_len * | ||
3839 | (stripes - stripe_nr + 1); | ||
3840 | |||
3841 | if (i < map->sub_stripes) { | ||
3842 | bbio->stripes[i].length -= | 3780 | bbio->stripes[i].length -= |
3843 | stripe_offset; | 3781 | stripe_offset; |
3844 | if (i == map->sub_stripes - 1) | 3782 | if ((i / sub_stripes + 1) % |
3845 | stripe_offset = 0; | 3783 | sub_stripes == remaining_stripes) |
3846 | } | ||
3847 | if (stripe_index >= last_stripe && | ||
3848 | stripe_index <= (last_stripe + | ||
3849 | map->sub_stripes - 1)) { | ||
3850 | bbio->stripes[i].length -= | 3784 | bbio->stripes[i].length -= |
3851 | stripe_end_offset; | 3785 | stripe_end_offset; |
3852 | } | 3786 | if (i == sub_stripes - 1) |
3787 | stripe_offset = 0; | ||
3853 | } else | 3788 | } else |
3854 | bbio->stripes[i].length = *length; | 3789 | bbio->stripes[i].length = *length; |
3855 | 3790 | ||
@@ -3871,15 +3806,22 @@ again: | |||
3871 | stripe_index++; | 3806 | stripe_index++; |
3872 | } | 3807 | } |
3873 | } | 3808 | } |
3874 | if (bbio_ret) { | 3809 | |
3875 | *bbio_ret = bbio; | 3810 | if (rw & REQ_WRITE) { |
3876 | bbio->num_stripes = num_stripes; | 3811 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | |
3877 | bbio->max_errors = max_errors; | 3812 | BTRFS_BLOCK_GROUP_RAID10 | |
3878 | bbio->mirror_num = mirror_num; | 3813 | BTRFS_BLOCK_GROUP_DUP)) { |
3814 | max_errors = 1; | ||
3815 | } | ||
3879 | } | 3816 | } |
3817 | |||
3818 | *bbio_ret = bbio; | ||
3819 | bbio->num_stripes = num_stripes; | ||
3820 | bbio->max_errors = max_errors; | ||
3821 | bbio->mirror_num = mirror_num; | ||
3880 | out: | 3822 | out: |
3881 | free_extent_map(em); | 3823 | free_extent_map(em); |
3882 | return 0; | 3824 | return ret; |
3883 | } | 3825 | } |
3884 | 3826 | ||
3885 | int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, | 3827 | int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, |
@@ -4284,7 +4226,7 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid) | |||
4284 | struct btrfs_fs_devices *fs_devices; | 4226 | struct btrfs_fs_devices *fs_devices; |
4285 | int ret; | 4227 | int ret; |
4286 | 4228 | ||
4287 | mutex_lock(&uuid_mutex); | 4229 | BUG_ON(!mutex_is_locked(&uuid_mutex)); |
4288 | 4230 | ||
4289 | fs_devices = root->fs_info->fs_devices->seed; | 4231 | fs_devices = root->fs_info->fs_devices->seed; |
4290 | while (fs_devices) { | 4232 | while (fs_devices) { |
@@ -4322,7 +4264,6 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid) | |||
4322 | fs_devices->seed = root->fs_info->fs_devices->seed; | 4264 | fs_devices->seed = root->fs_info->fs_devices->seed; |
4323 | root->fs_info->fs_devices->seed = fs_devices; | 4265 | root->fs_info->fs_devices->seed = fs_devices; |
4324 | out: | 4266 | out: |
4325 | mutex_unlock(&uuid_mutex); | ||
4326 | return ret; | 4267 | return ret; |
4327 | } | 4268 | } |
4328 | 4269 | ||
@@ -4465,6 +4406,9 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
4465 | if (!path) | 4406 | if (!path) |
4466 | return -ENOMEM; | 4407 | return -ENOMEM; |
4467 | 4408 | ||
4409 | mutex_lock(&uuid_mutex); | ||
4410 | lock_chunks(root); | ||
4411 | |||
4468 | /* first we search for all of the device items, and then we | 4412 | /* first we search for all of the device items, and then we |
4469 | * read in all of the chunk items. This way we can create chunk | 4413 | * read in all of the chunk items. This way we can create chunk |
4470 | * mappings that reference all of the devices that are afound | 4414 | * mappings that reference all of the devices that are afound |
@@ -4515,6 +4459,9 @@ again: | |||
4515 | } | 4459 | } |
4516 | ret = 0; | 4460 | ret = 0; |
4517 | error: | 4461 | error: |
4462 | unlock_chunks(root); | ||
4463 | mutex_unlock(&uuid_mutex); | ||
4464 | |||
4518 | btrfs_free_path(path); | 4465 | btrfs_free_path(path); |
4519 | return ret; | 4466 | return ret; |
4520 | } | 4467 | } |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 6faec9dd1f93..19ac95048b88 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -279,7 +279,6 @@ int btrfs_recover_balance(struct btrfs_root *tree_root); | |||
279 | int btrfs_pause_balance(struct btrfs_fs_info *fs_info); | 279 | int btrfs_pause_balance(struct btrfs_fs_info *fs_info); |
280 | int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); | 280 | int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); |
281 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); | 281 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); |
282 | int find_free_dev_extent(struct btrfs_trans_handle *trans, | 282 | int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, |
283 | struct btrfs_device *device, u64 num_bytes, | ||
284 | u64 *start, u64 *max_avail); | 283 | u64 *start, u64 *max_avail); |
285 | #endif | 284 | #endif |