diff options
author | Chris Mason <chris.mason@oracle.com> | 2012-01-16 15:26:17 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2012-01-16 15:26:17 -0500 |
commit | d756bd2d9339447c29bde950910586df8f8941ec (patch) | |
tree | f96aeb682bcc4fdcf75d080f260c809b9fbc4a1a /fs/btrfs/free-space-cache.c | |
parent | 27263e28321db438bc43dc0c0be432ce91526224 (diff) | |
parent | b367e47fb3a70f5d24ebd6faf7d42436d485fb2d (diff) |
Merge branch 'for-chris' of git://repo.or.cz/linux-btrfs-devel into integration
Conflicts:
fs/btrfs/volumes.c
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 293 |
1 files changed, 196 insertions, 97 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ce40db59c70..6c7887a7770 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -319,9 +319,11 @@ static void io_ctl_drop_pages(struct io_ctl *io_ctl) | |||
319 | io_ctl_unmap_page(io_ctl); | 319 | io_ctl_unmap_page(io_ctl); |
320 | 320 | ||
321 | for (i = 0; i < io_ctl->num_pages; i++) { | 321 | for (i = 0; i < io_ctl->num_pages; i++) { |
322 | ClearPageChecked(io_ctl->pages[i]); | 322 | if (io_ctl->pages[i]) { |
323 | unlock_page(io_ctl->pages[i]); | 323 | ClearPageChecked(io_ctl->pages[i]); |
324 | page_cache_release(io_ctl->pages[i]); | 324 | unlock_page(io_ctl->pages[i]); |
325 | page_cache_release(io_ctl->pages[i]); | ||
326 | } | ||
325 | } | 327 | } |
326 | } | 328 | } |
327 | 329 | ||
@@ -635,7 +637,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
635 | if (!num_entries) | 637 | if (!num_entries) |
636 | return 0; | 638 | return 0; |
637 | 639 | ||
638 | io_ctl_init(&io_ctl, inode, root); | 640 | ret = io_ctl_init(&io_ctl, inode, root); |
641 | if (ret) | ||
642 | return ret; | ||
643 | |||
639 | ret = readahead_cache(inode); | 644 | ret = readahead_cache(inode); |
640 | if (ret) | 645 | if (ret) |
641 | goto out; | 646 | goto out; |
@@ -838,7 +843,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
838 | struct io_ctl io_ctl; | 843 | struct io_ctl io_ctl; |
839 | struct list_head bitmap_list; | 844 | struct list_head bitmap_list; |
840 | struct btrfs_key key; | 845 | struct btrfs_key key; |
841 | u64 start, end, len; | 846 | u64 start, extent_start, extent_end, len; |
842 | int entries = 0; | 847 | int entries = 0; |
843 | int bitmaps = 0; | 848 | int bitmaps = 0; |
844 | int ret; | 849 | int ret; |
@@ -849,7 +854,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
849 | if (!i_size_read(inode)) | 854 | if (!i_size_read(inode)) |
850 | return -1; | 855 | return -1; |
851 | 856 | ||
852 | io_ctl_init(&io_ctl, inode, root); | 857 | ret = io_ctl_init(&io_ctl, inode, root); |
858 | if (ret) | ||
859 | return -1; | ||
853 | 860 | ||
854 | /* Get the cluster for this block_group if it exists */ | 861 | /* Get the cluster for this block_group if it exists */ |
855 | if (block_group && !list_empty(&block_group->cluster_list)) | 862 | if (block_group && !list_empty(&block_group->cluster_list)) |
@@ -857,25 +864,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
857 | struct btrfs_free_cluster, | 864 | struct btrfs_free_cluster, |
858 | block_group_list); | 865 | block_group_list); |
859 | 866 | ||
860 | /* | ||
861 | * We shouldn't have switched the pinned extents yet so this is the | ||
862 | * right one | ||
863 | */ | ||
864 | unpin = root->fs_info->pinned_extents; | ||
865 | |||
866 | /* Lock all pages first so we can lock the extent safely. */ | 867 | /* Lock all pages first so we can lock the extent safely. */ |
867 | io_ctl_prepare_pages(&io_ctl, inode, 0); | 868 | io_ctl_prepare_pages(&io_ctl, inode, 0); |
868 | 869 | ||
869 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | 870 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
870 | 0, &cached_state, GFP_NOFS); | 871 | 0, &cached_state, GFP_NOFS); |
871 | 872 | ||
872 | /* | ||
873 | * When searching for pinned extents, we need to start at our start | ||
874 | * offset. | ||
875 | */ | ||
876 | if (block_group) | ||
877 | start = block_group->key.objectid; | ||
878 | |||
879 | node = rb_first(&ctl->free_space_offset); | 873 | node = rb_first(&ctl->free_space_offset); |
880 | if (!node && cluster) { | 874 | if (!node && cluster) { |
881 | node = rb_first(&cluster->root); | 875 | node = rb_first(&cluster->root); |
@@ -918,9 +912,20 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
918 | * We want to add any pinned extents to our free space cache | 912 | * We want to add any pinned extents to our free space cache |
919 | * so we don't leak the space | 913 | * so we don't leak the space |
920 | */ | 914 | */ |
915 | |||
916 | /* | ||
917 | * We shouldn't have switched the pinned extents yet so this is the | ||
918 | * right one | ||
919 | */ | ||
920 | unpin = root->fs_info->pinned_extents; | ||
921 | |||
922 | if (block_group) | ||
923 | start = block_group->key.objectid; | ||
924 | |||
921 | while (block_group && (start < block_group->key.objectid + | 925 | while (block_group && (start < block_group->key.objectid + |
922 | block_group->key.offset)) { | 926 | block_group->key.offset)) { |
923 | ret = find_first_extent_bit(unpin, start, &start, &end, | 927 | ret = find_first_extent_bit(unpin, start, |
928 | &extent_start, &extent_end, | ||
924 | EXTENT_DIRTY); | 929 | EXTENT_DIRTY); |
925 | if (ret) { | 930 | if (ret) { |
926 | ret = 0; | 931 | ret = 0; |
@@ -928,20 +933,21 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
928 | } | 933 | } |
929 | 934 | ||
930 | /* This pinned extent is out of our range */ | 935 | /* This pinned extent is out of our range */ |
931 | if (start >= block_group->key.objectid + | 936 | if (extent_start >= block_group->key.objectid + |
932 | block_group->key.offset) | 937 | block_group->key.offset) |
933 | break; | 938 | break; |
934 | 939 | ||
935 | len = block_group->key.objectid + | 940 | extent_start = max(extent_start, start); |
936 | block_group->key.offset - start; | 941 | extent_end = min(block_group->key.objectid + |
937 | len = min(len, end + 1 - start); | 942 | block_group->key.offset, extent_end + 1); |
943 | len = extent_end - extent_start; | ||
938 | 944 | ||
939 | entries++; | 945 | entries++; |
940 | ret = io_ctl_add_entry(&io_ctl, start, len, NULL); | 946 | ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL); |
941 | if (ret) | 947 | if (ret) |
942 | goto out_nospc; | 948 | goto out_nospc; |
943 | 949 | ||
944 | start = end + 1; | 950 | start = extent_end; |
945 | } | 951 | } |
946 | 952 | ||
947 | /* Write out the bitmaps */ | 953 | /* Write out the bitmaps */ |
@@ -2574,17 +2580,57 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
2574 | cluster->block_group = NULL; | 2580 | cluster->block_group = NULL; |
2575 | } | 2581 | } |
2576 | 2582 | ||
2577 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | 2583 | static int do_trimming(struct btrfs_block_group_cache *block_group, |
2578 | u64 *trimmed, u64 start, u64 end, u64 minlen) | 2584 | u64 *total_trimmed, u64 start, u64 bytes, |
2585 | u64 reserved_start, u64 reserved_bytes) | ||
2579 | { | 2586 | { |
2580 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2587 | struct btrfs_space_info *space_info = block_group->space_info; |
2581 | struct btrfs_free_space *entry = NULL; | ||
2582 | struct btrfs_fs_info *fs_info = block_group->fs_info; | 2588 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2583 | u64 bytes = 0; | 2589 | int ret; |
2584 | u64 actually_trimmed; | 2590 | int update = 0; |
2585 | int ret = 0; | 2591 | u64 trimmed = 0; |
2586 | 2592 | ||
2587 | *trimmed = 0; | 2593 | spin_lock(&space_info->lock); |
2594 | spin_lock(&block_group->lock); | ||
2595 | if (!block_group->ro) { | ||
2596 | block_group->reserved += reserved_bytes; | ||
2597 | space_info->bytes_reserved += reserved_bytes; | ||
2598 | update = 1; | ||
2599 | } | ||
2600 | spin_unlock(&block_group->lock); | ||
2601 | spin_unlock(&space_info->lock); | ||
2602 | |||
2603 | ret = btrfs_error_discard_extent(fs_info->extent_root, | ||
2604 | start, bytes, &trimmed); | ||
2605 | if (!ret) | ||
2606 | *total_trimmed += trimmed; | ||
2607 | |||
2608 | btrfs_add_free_space(block_group, reserved_start, reserved_bytes); | ||
2609 | |||
2610 | if (update) { | ||
2611 | spin_lock(&space_info->lock); | ||
2612 | spin_lock(&block_group->lock); | ||
2613 | if (block_group->ro) | ||
2614 | space_info->bytes_readonly += reserved_bytes; | ||
2615 | block_group->reserved -= reserved_bytes; | ||
2616 | space_info->bytes_reserved -= reserved_bytes; | ||
2617 | spin_unlock(&space_info->lock); | ||
2618 | spin_unlock(&block_group->lock); | ||
2619 | } | ||
2620 | |||
2621 | return ret; | ||
2622 | } | ||
2623 | |||
2624 | static int trim_no_bitmap(struct btrfs_block_group_cache *block_group, | ||
2625 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | ||
2626 | { | ||
2627 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2628 | struct btrfs_free_space *entry; | ||
2629 | struct rb_node *node; | ||
2630 | int ret = 0; | ||
2631 | u64 extent_start; | ||
2632 | u64 extent_bytes; | ||
2633 | u64 bytes; | ||
2588 | 2634 | ||
2589 | while (start < end) { | 2635 | while (start < end) { |
2590 | spin_lock(&ctl->tree_lock); | 2636 | spin_lock(&ctl->tree_lock); |
@@ -2595,81 +2641,118 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2595 | } | 2641 | } |
2596 | 2642 | ||
2597 | entry = tree_search_offset(ctl, start, 0, 1); | 2643 | entry = tree_search_offset(ctl, start, 0, 1); |
2598 | if (!entry) | 2644 | if (!entry) { |
2599 | entry = tree_search_offset(ctl, | ||
2600 | offset_to_bitmap(ctl, start), | ||
2601 | 1, 1); | ||
2602 | |||
2603 | if (!entry || entry->offset >= end) { | ||
2604 | spin_unlock(&ctl->tree_lock); | 2645 | spin_unlock(&ctl->tree_lock); |
2605 | break; | 2646 | break; |
2606 | } | 2647 | } |
2607 | 2648 | ||
2608 | if (entry->bitmap) { | 2649 | /* skip bitmaps */ |
2609 | ret = search_bitmap(ctl, entry, &start, &bytes); | 2650 | while (entry->bitmap) { |
2610 | if (!ret) { | 2651 | node = rb_next(&entry->offset_index); |
2611 | if (start >= end) { | 2652 | if (!node) { |
2612 | spin_unlock(&ctl->tree_lock); | ||
2613 | break; | ||
2614 | } | ||
2615 | bytes = min(bytes, end - start); | ||
2616 | bitmap_clear_bits(ctl, entry, start, bytes); | ||
2617 | if (entry->bytes == 0) | ||
2618 | free_bitmap(ctl, entry); | ||
2619 | } else { | ||
2620 | start = entry->offset + BITS_PER_BITMAP * | ||
2621 | block_group->sectorsize; | ||
2622 | spin_unlock(&ctl->tree_lock); | 2653 | spin_unlock(&ctl->tree_lock); |
2623 | ret = 0; | 2654 | goto out; |
2624 | continue; | ||
2625 | } | 2655 | } |
2626 | } else { | 2656 | entry = rb_entry(node, struct btrfs_free_space, |
2627 | start = entry->offset; | 2657 | offset_index); |
2628 | bytes = min(entry->bytes, end - start); | 2658 | } |
2629 | unlink_free_space(ctl, entry); | 2659 | |
2630 | kmem_cache_free(btrfs_free_space_cachep, entry); | 2660 | if (entry->offset >= end) { |
2661 | spin_unlock(&ctl->tree_lock); | ||
2662 | break; | ||
2631 | } | 2663 | } |
2632 | 2664 | ||
2665 | extent_start = entry->offset; | ||
2666 | extent_bytes = entry->bytes; | ||
2667 | start = max(start, extent_start); | ||
2668 | bytes = min(extent_start + extent_bytes, end) - start; | ||
2669 | if (bytes < minlen) { | ||
2670 | spin_unlock(&ctl->tree_lock); | ||
2671 | goto next; | ||
2672 | } | ||
2673 | |||
2674 | unlink_free_space(ctl, entry); | ||
2675 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2676 | |||
2633 | spin_unlock(&ctl->tree_lock); | 2677 | spin_unlock(&ctl->tree_lock); |
2634 | 2678 | ||
2635 | if (bytes >= minlen) { | 2679 | ret = do_trimming(block_group, total_trimmed, start, bytes, |
2636 | struct btrfs_space_info *space_info; | 2680 | extent_start, extent_bytes); |
2637 | int update = 0; | 2681 | if (ret) |
2638 | 2682 | break; | |
2639 | space_info = block_group->space_info; | 2683 | next: |
2640 | spin_lock(&space_info->lock); | 2684 | start += bytes; |
2641 | spin_lock(&block_group->lock); | ||
2642 | if (!block_group->ro) { | ||
2643 | block_group->reserved += bytes; | ||
2644 | space_info->bytes_reserved += bytes; | ||
2645 | update = 1; | ||
2646 | } | ||
2647 | spin_unlock(&block_group->lock); | ||
2648 | spin_unlock(&space_info->lock); | ||
2649 | |||
2650 | ret = btrfs_error_discard_extent(fs_info->extent_root, | ||
2651 | start, | ||
2652 | bytes, | ||
2653 | &actually_trimmed); | ||
2654 | |||
2655 | btrfs_add_free_space(block_group, start, bytes); | ||
2656 | if (update) { | ||
2657 | spin_lock(&space_info->lock); | ||
2658 | spin_lock(&block_group->lock); | ||
2659 | if (block_group->ro) | ||
2660 | space_info->bytes_readonly += bytes; | ||
2661 | block_group->reserved -= bytes; | ||
2662 | space_info->bytes_reserved -= bytes; | ||
2663 | spin_unlock(&space_info->lock); | ||
2664 | spin_unlock(&block_group->lock); | ||
2665 | } | ||
2666 | 2685 | ||
2667 | if (ret) | 2686 | if (fatal_signal_pending(current)) { |
2668 | break; | 2687 | ret = -ERESTARTSYS; |
2669 | *trimmed += actually_trimmed; | 2688 | break; |
2689 | } | ||
2690 | |||
2691 | cond_resched(); | ||
2692 | } | ||
2693 | out: | ||
2694 | return ret; | ||
2695 | } | ||
2696 | |||
2697 | static int trim_bitmaps(struct btrfs_block_group_cache *block_group, | ||
2698 | u64 *total_trimmed, u64 start, u64 end, u64 minlen) | ||
2699 | { | ||
2700 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2701 | struct btrfs_free_space *entry; | ||
2702 | int ret = 0; | ||
2703 | int ret2; | ||
2704 | u64 bytes; | ||
2705 | u64 offset = offset_to_bitmap(ctl, start); | ||
2706 | |||
2707 | while (offset < end) { | ||
2708 | bool next_bitmap = false; | ||
2709 | |||
2710 | spin_lock(&ctl->tree_lock); | ||
2711 | |||
2712 | if (ctl->free_space < minlen) { | ||
2713 | spin_unlock(&ctl->tree_lock); | ||
2714 | break; | ||
2715 | } | ||
2716 | |||
2717 | entry = tree_search_offset(ctl, offset, 1, 0); | ||
2718 | if (!entry) { | ||
2719 | spin_unlock(&ctl->tree_lock); | ||
2720 | next_bitmap = true; | ||
2721 | goto next; | ||
2722 | } | ||
2723 | |||
2724 | bytes = minlen; | ||
2725 | ret2 = search_bitmap(ctl, entry, &start, &bytes); | ||
2726 | if (ret2 || start >= end) { | ||
2727 | spin_unlock(&ctl->tree_lock); | ||
2728 | next_bitmap = true; | ||
2729 | goto next; | ||
2730 | } | ||
2731 | |||
2732 | bytes = min(bytes, end - start); | ||
2733 | if (bytes < minlen) { | ||
2734 | spin_unlock(&ctl->tree_lock); | ||
2735 | goto next; | ||
2736 | } | ||
2737 | |||
2738 | bitmap_clear_bits(ctl, entry, start, bytes); | ||
2739 | if (entry->bytes == 0) | ||
2740 | free_bitmap(ctl, entry); | ||
2741 | |||
2742 | spin_unlock(&ctl->tree_lock); | ||
2743 | |||
2744 | ret = do_trimming(block_group, total_trimmed, start, bytes, | ||
2745 | start, bytes); | ||
2746 | if (ret) | ||
2747 | break; | ||
2748 | next: | ||
2749 | if (next_bitmap) { | ||
2750 | offset += BITS_PER_BITMAP * ctl->unit; | ||
2751 | } else { | ||
2752 | start += bytes; | ||
2753 | if (start >= offset + BITS_PER_BITMAP * ctl->unit) | ||
2754 | offset += BITS_PER_BITMAP * ctl->unit; | ||
2670 | } | 2755 | } |
2671 | start += bytes; | ||
2672 | bytes = 0; | ||
2673 | 2756 | ||
2674 | if (fatal_signal_pending(current)) { | 2757 | if (fatal_signal_pending(current)) { |
2675 | ret = -ERESTARTSYS; | 2758 | ret = -ERESTARTSYS; |
@@ -2682,6 +2765,22 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2682 | return ret; | 2765 | return ret; |
2683 | } | 2766 | } |
2684 | 2767 | ||
2768 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | ||
2769 | u64 *trimmed, u64 start, u64 end, u64 minlen) | ||
2770 | { | ||
2771 | int ret; | ||
2772 | |||
2773 | *trimmed = 0; | ||
2774 | |||
2775 | ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); | ||
2776 | if (ret) | ||
2777 | return ret; | ||
2778 | |||
2779 | ret = trim_bitmaps(block_group, trimmed, start, end, minlen); | ||
2780 | |||
2781 | return ret; | ||
2782 | } | ||
2783 | |||
2685 | /* | 2784 | /* |
2686 | * Find the left-most item in the cache tree, and then return the | 2785 | * Find the left-most item in the cache tree, and then return the |
2687 | * smallest inode number in the item. | 2786 | * smallest inode number in the item. |