aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c100
1 files changed, 59 insertions, 41 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 700879ed64cf..37e0a800d34e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -34,23 +34,24 @@
34#include "locking.h" 34#include "locking.h"
35#include "free-space-cache.h" 35#include "free-space-cache.h"
36 36
37/* control flags for do_chunk_alloc's force field 37/*
38 * control flags for do_chunk_alloc's force field
38 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk 39 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
39 * if we really need one. 40 * if we really need one.
40 * 41 *
41 * CHUNK_ALLOC_FORCE means it must try to allocate one
42 *
43 * CHUNK_ALLOC_LIMITED means to only try and allocate one 42 * CHUNK_ALLOC_LIMITED means to only try and allocate one
44 * if we have very few chunks already allocated. This is 43 * if we have very few chunks already allocated. This is
45 * used as part of the clustering code to help make sure 44 * used as part of the clustering code to help make sure
46 * we have a good pool of storage to cluster in, without 45 * we have a good pool of storage to cluster in, without
47 * filling the FS with empty chunks 46 * filling the FS with empty chunks
48 * 47 *
48 * CHUNK_ALLOC_FORCE means it must try to allocate one
49 *
49 */ 50 */
50enum { 51enum {
51 CHUNK_ALLOC_NO_FORCE = 0, 52 CHUNK_ALLOC_NO_FORCE = 0,
52 CHUNK_ALLOC_FORCE = 1, 53 CHUNK_ALLOC_LIMITED = 1,
53 CHUNK_ALLOC_LIMITED = 2, 54 CHUNK_ALLOC_FORCE = 2,
54}; 55};
55 56
56/* 57/*
@@ -3311,7 +3312,8 @@ commit_trans:
3311 } 3312 }
3312 data_sinfo->bytes_may_use += bytes; 3313 data_sinfo->bytes_may_use += bytes;
3313 trace_btrfs_space_reservation(root->fs_info, "space_info", 3314 trace_btrfs_space_reservation(root->fs_info, "space_info",
3314 (u64)data_sinfo, bytes, 1); 3315 (u64)(unsigned long)data_sinfo,
3316 bytes, 1);
3315 spin_unlock(&data_sinfo->lock); 3317 spin_unlock(&data_sinfo->lock);
3316 3318
3317 return 0; 3319 return 0;
@@ -3332,7 +3334,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3332 spin_lock(&data_sinfo->lock); 3334 spin_lock(&data_sinfo->lock);
3333 data_sinfo->bytes_may_use -= bytes; 3335 data_sinfo->bytes_may_use -= bytes;
3334 trace_btrfs_space_reservation(root->fs_info, "space_info", 3336 trace_btrfs_space_reservation(root->fs_info, "space_info",
3335 (u64)data_sinfo, bytes, 0); 3337 (u64)(unsigned long)data_sinfo,
3338 bytes, 0);
3336 spin_unlock(&data_sinfo->lock); 3339 spin_unlock(&data_sinfo->lock);
3337} 3340}
3338 3341
@@ -3414,7 +3417,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3414 3417
3415again: 3418again:
3416 spin_lock(&space_info->lock); 3419 spin_lock(&space_info->lock);
3417 if (space_info->force_alloc) 3420 if (force < space_info->force_alloc)
3418 force = space_info->force_alloc; 3421 force = space_info->force_alloc;
3419 if (space_info->full) { 3422 if (space_info->full) {
3420 spin_unlock(&space_info->lock); 3423 spin_unlock(&space_info->lock);
@@ -3610,12 +3613,15 @@ static int may_commit_transaction(struct btrfs_root *root,
3610 if (space_info != delayed_rsv->space_info) 3613 if (space_info != delayed_rsv->space_info)
3611 return -ENOSPC; 3614 return -ENOSPC;
3612 3615
3616 spin_lock(&space_info->lock);
3613 spin_lock(&delayed_rsv->lock); 3617 spin_lock(&delayed_rsv->lock);
3614 if (delayed_rsv->size < bytes) { 3618 if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3615 spin_unlock(&delayed_rsv->lock); 3619 spin_unlock(&delayed_rsv->lock);
3620 spin_unlock(&space_info->lock);
3616 return -ENOSPC; 3621 return -ENOSPC;
3617 } 3622 }
3618 spin_unlock(&delayed_rsv->lock); 3623 spin_unlock(&delayed_rsv->lock);
3624 spin_unlock(&space_info->lock);
3619 3625
3620commit: 3626commit:
3621 trans = btrfs_join_transaction(root); 3627 trans = btrfs_join_transaction(root);
@@ -3694,9 +3700,9 @@ again:
3694 if (used + orig_bytes <= space_info->total_bytes) { 3700 if (used + orig_bytes <= space_info->total_bytes) {
3695 space_info->bytes_may_use += orig_bytes; 3701 space_info->bytes_may_use += orig_bytes;
3696 trace_btrfs_space_reservation(root->fs_info, 3702 trace_btrfs_space_reservation(root->fs_info,
3697 "space_info", 3703 "space_info",
3698 (u64)space_info, 3704 (u64)(unsigned long)space_info,
3699 orig_bytes, 1); 3705 orig_bytes, 1);
3700 ret = 0; 3706 ret = 0;
3701 } else { 3707 } else {
3702 /* 3708 /*
@@ -3765,9 +3771,9 @@ again:
3765 if (used + num_bytes < space_info->total_bytes + avail) { 3771 if (used + num_bytes < space_info->total_bytes + avail) {
3766 space_info->bytes_may_use += orig_bytes; 3772 space_info->bytes_may_use += orig_bytes;
3767 trace_btrfs_space_reservation(root->fs_info, 3773 trace_btrfs_space_reservation(root->fs_info,
3768 "space_info", 3774 "space_info",
3769 (u64)space_info, 3775 (u64)(unsigned long)space_info,
3770 orig_bytes, 1); 3776 orig_bytes, 1);
3771 ret = 0; 3777 ret = 0;
3772 } else { 3778 } else {
3773 wait_ordered = true; 3779 wait_ordered = true;
@@ -3912,8 +3918,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3912 spin_lock(&space_info->lock); 3918 spin_lock(&space_info->lock);
3913 space_info->bytes_may_use -= num_bytes; 3919 space_info->bytes_may_use -= num_bytes;
3914 trace_btrfs_space_reservation(fs_info, "space_info", 3920 trace_btrfs_space_reservation(fs_info, "space_info",
3915 (u64)space_info, 3921 (u64)(unsigned long)space_info,
3916 num_bytes, 0); 3922 num_bytes, 0);
3917 space_info->reservation_progress++; 3923 space_info->reservation_progress++;
3918 spin_unlock(&space_info->lock); 3924 spin_unlock(&space_info->lock);
3919 } 3925 }
@@ -4104,7 +4110,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4104 num_bytes += div64_u64(data_used + meta_used, 50); 4110 num_bytes += div64_u64(data_used + meta_used, 50);
4105 4111
4106 if (num_bytes * 3 > meta_used) 4112 if (num_bytes * 3 > meta_used)
4107 num_bytes = div64_u64(meta_used, 3); 4113 num_bytes = div64_u64(meta_used, 3) * 2;
4108 4114
4109 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); 4115 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4110} 4116}
@@ -4131,14 +4137,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4131 block_rsv->reserved += num_bytes; 4137 block_rsv->reserved += num_bytes;
4132 sinfo->bytes_may_use += num_bytes; 4138 sinfo->bytes_may_use += num_bytes;
4133 trace_btrfs_space_reservation(fs_info, "space_info", 4139 trace_btrfs_space_reservation(fs_info, "space_info",
4134 (u64)sinfo, num_bytes, 1); 4140 (u64)(unsigned long)sinfo, num_bytes, 1);
4135 } 4141 }
4136 4142
4137 if (block_rsv->reserved >= block_rsv->size) { 4143 if (block_rsv->reserved >= block_rsv->size) {
4138 num_bytes = block_rsv->reserved - block_rsv->size; 4144 num_bytes = block_rsv->reserved - block_rsv->size;
4139 sinfo->bytes_may_use -= num_bytes; 4145 sinfo->bytes_may_use -= num_bytes;
4140 trace_btrfs_space_reservation(fs_info, "space_info", 4146 trace_btrfs_space_reservation(fs_info, "space_info",
4141 (u64)sinfo, num_bytes, 0); 4147 (u64)(unsigned long)sinfo, num_bytes, 0);
4142 sinfo->reservation_progress++; 4148 sinfo->reservation_progress++;
4143 block_rsv->reserved = block_rsv->size; 4149 block_rsv->reserved = block_rsv->size;
4144 block_rsv->full = 1; 4150 block_rsv->full = 1;
@@ -4191,7 +4197,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4191 if (!trans->bytes_reserved) 4197 if (!trans->bytes_reserved)
4192 return; 4198 return;
4193 4199
4194 trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans, 4200 trace_btrfs_space_reservation(root->fs_info, "transaction",
4201 (u64)(unsigned long)trans,
4195 trans->bytes_reserved, 0); 4202 trans->bytes_reserved, 0);
4196 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 4203 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4197 trans->bytes_reserved = 0; 4204 trans->bytes_reserved = 0;
@@ -4709,9 +4716,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4709 space_info->bytes_reserved += num_bytes; 4716 space_info->bytes_reserved += num_bytes;
4710 if (reserve == RESERVE_ALLOC) { 4717 if (reserve == RESERVE_ALLOC) {
4711 trace_btrfs_space_reservation(cache->fs_info, 4718 trace_btrfs_space_reservation(cache->fs_info,
4712 "space_info", 4719 "space_info",
4713 (u64)space_info, 4720 (u64)(unsigned long)space_info,
4714 num_bytes, 0); 4721 num_bytes, 0);
4715 space_info->bytes_may_use -= num_bytes; 4722 space_info->bytes_may_use -= num_bytes;
4716 } 4723 }
4717 } 4724 }
@@ -5794,6 +5801,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5794 u64 search_end, struct btrfs_key *ins, 5801 u64 search_end, struct btrfs_key *ins,
5795 u64 data) 5802 u64 data)
5796{ 5803{
5804 bool final_tried = false;
5797 int ret; 5805 int ret;
5798 u64 search_start = 0; 5806 u64 search_start = 0;
5799 5807
@@ -5813,22 +5821,25 @@ again:
5813 search_start, search_end, hint_byte, 5821 search_start, search_end, hint_byte,
5814 ins, data); 5822 ins, data);
5815 5823
5816 if (ret == -ENOSPC && num_bytes > min_alloc_size) { 5824 if (ret == -ENOSPC) {
5817 num_bytes = num_bytes >> 1; 5825 if (!final_tried) {
5818 num_bytes = num_bytes & ~(root->sectorsize - 1); 5826 num_bytes = num_bytes >> 1;
5819 num_bytes = max(num_bytes, min_alloc_size); 5827 num_bytes = num_bytes & ~(root->sectorsize - 1);
5820 do_chunk_alloc(trans, root->fs_info->extent_root, 5828 num_bytes = max(num_bytes, min_alloc_size);
5821 num_bytes, data, CHUNK_ALLOC_FORCE); 5829 do_chunk_alloc(trans, root->fs_info->extent_root,
5822 goto again; 5830 num_bytes, data, CHUNK_ALLOC_FORCE);
5823 } 5831 if (num_bytes == min_alloc_size)
5824 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { 5832 final_tried = true;
5825 struct btrfs_space_info *sinfo; 5833 goto again;
5826 5834 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
5827 sinfo = __find_space_info(root->fs_info, data); 5835 struct btrfs_space_info *sinfo;
5828 printk(KERN_ERR "btrfs allocation failed flags %llu, " 5836
5829 "wanted %llu\n", (unsigned long long)data, 5837 sinfo = __find_space_info(root->fs_info, data);
5830 (unsigned long long)num_bytes); 5838 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5831 dump_space_info(sinfo, num_bytes, 1); 5839 "wanted %llu\n", (unsigned long long)data,
5840 (unsigned long long)num_bytes);
5841 dump_space_info(sinfo, num_bytes, 1);
5842 }
5832 } 5843 }
5833 5844
5834 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset); 5845 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
@@ -7881,9 +7892,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7881 u64 start; 7892 u64 start;
7882 u64 end; 7893 u64 end;
7883 u64 trimmed = 0; 7894 u64 trimmed = 0;
7895 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
7884 int ret = 0; 7896 int ret = 0;
7885 7897
7886 cache = btrfs_lookup_block_group(fs_info, range->start); 7898 /*
7899 * try to trim all FS space, our block group may start from non-zero.
7900 */
7901 if (range->len == total_bytes)
7902 cache = btrfs_lookup_first_block_group(fs_info, range->start);
7903 else
7904 cache = btrfs_lookup_block_group(fs_info, range->start);
7887 7905
7888 while (cache) { 7906 while (cache) {
7889 if (cache->key.objectid >= (range->start + range->len)) { 7907 if (cache->key.objectid >= (range->start + range->len)) {