aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2009-02-20 10:59:53 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-20 10:59:53 -0500
commit4e06bdd6cbd5105376e7caf4e683ed131e777389 (patch)
tree20a7b891f7f8f518eb2cd81234e0c9ab5902c6c3 /fs/btrfs/extent-tree.c
parent6a63209fc02d5483371f07e4913ee8abad608051 (diff)
Btrfs: try committing transaction before returning ENOSPC
This fixes a problem where we could return -ENOSPC when we may actually have plenty of space, the space is just pinned. Instead of returning -ENOSPC immediately, commit the transaction first and then try and do the allocation again. This patch also does chunk allocation for metadata if we pass the 80% threshold for metadata space. This will help with stack usage since the chunk allocation will happen early on, instead of when the allocation is happening. Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c57
1 files changed, 47 insertions, 10 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e11875e97c2f..6b5966aacf44 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2017,26 +2017,49 @@ int btrfs_check_metadata_free_space(struct btrfs_root *root)
2017 struct btrfs_fs_info *info = root->fs_info; 2017 struct btrfs_fs_info *info = root->fs_info;
2018 struct btrfs_space_info *meta_sinfo; 2018 struct btrfs_space_info *meta_sinfo;
2019 u64 alloc_target, thresh; 2019 u64 alloc_target, thresh;
2020 int committed = 0, ret;
2020 2021
2021 /* get the space info for where the metadata will live */ 2022 /* get the space info for where the metadata will live */
2022 alloc_target = btrfs_get_alloc_profile(root, 0); 2023 alloc_target = btrfs_get_alloc_profile(root, 0);
2023 meta_sinfo = __find_space_info(info, alloc_target); 2024 meta_sinfo = __find_space_info(info, alloc_target);
2024 2025
2025 /* 2026again:
2026 * if the metadata area isn't maxed out then there is no sense in
2027 * checking how much is used, since we can always allocate a new chunk
2028 */
2029 if (!meta_sinfo->full)
2030 return 0;
2031
2032 spin_lock(&meta_sinfo->lock); 2027 spin_lock(&meta_sinfo->lock);
2033 thresh = meta_sinfo->total_bytes * 95; 2028 if (!meta_sinfo->full)
2029 thresh = meta_sinfo->total_bytes * 80;
2030 else
2031 thresh = meta_sinfo->total_bytes * 95;
2034 2032
2035 do_div(thresh, 100); 2033 do_div(thresh, 100);
2036 2034
2037 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + 2035 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2038 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) { 2036 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2037 struct btrfs_trans_handle *trans;
2038 if (!meta_sinfo->full) {
2039 meta_sinfo->force_alloc = 1;
2040 spin_unlock(&meta_sinfo->lock);
2041
2042 trans = btrfs_start_transaction(root, 1);
2043 if (!trans)
2044 return -ENOMEM;
2045
2046 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2047 2 * 1024 * 1024, alloc_target, 0);
2048 btrfs_end_transaction(trans, root);
2049 goto again;
2050 }
2039 spin_unlock(&meta_sinfo->lock); 2051 spin_unlock(&meta_sinfo->lock);
2052
2053 if (!committed) {
2054 committed = 1;
2055 trans = btrfs_join_transaction(root, 1);
2056 if (!trans)
2057 return -ENOMEM;
2058 ret = btrfs_commit_transaction(trans, root);
2059 if (ret)
2060 return ret;
2061 goto again;
2062 }
2040 return -ENOSPC; 2063 return -ENOSPC;
2041 } 2064 }
2042 spin_unlock(&meta_sinfo->lock); 2065 spin_unlock(&meta_sinfo->lock);
@@ -2052,7 +2075,7 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2052 u64 bytes) 2075 u64 bytes)
2053{ 2076{
2054 struct btrfs_space_info *data_sinfo; 2077 struct btrfs_space_info *data_sinfo;
2055 int ret = 0; 2078 int ret = 0, committed = 0;
2056 2079
2057 /* make sure bytes are sectorsize aligned */ 2080 /* make sure bytes are sectorsize aligned */
2058 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 2081 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
@@ -2065,13 +2088,14 @@ again:
2065 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved - 2088 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2066 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly - 2089 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2067 data_sinfo->bytes_may_use < bytes) { 2090 data_sinfo->bytes_may_use < bytes) {
2091 struct btrfs_trans_handle *trans;
2092
2068 /* 2093 /*
2069 * if we don't have enough free bytes in this space then we need 2094 * if we don't have enough free bytes in this space then we need
2070 * to alloc a new chunk. 2095 * to alloc a new chunk.
2071 */ 2096 */
2072 if (!data_sinfo->full) { 2097 if (!data_sinfo->full) {
2073 u64 alloc_target; 2098 u64 alloc_target;
2074 struct btrfs_trans_handle *trans;
2075 2099
2076 data_sinfo->force_alloc = 1; 2100 data_sinfo->force_alloc = 1;
2077 spin_unlock(&data_sinfo->lock); 2101 spin_unlock(&data_sinfo->lock);
@@ -2090,6 +2114,19 @@ again:
2090 goto again; 2114 goto again;
2091 } 2115 }
2092 spin_unlock(&data_sinfo->lock); 2116 spin_unlock(&data_sinfo->lock);
2117
2118 /* commit the current transaction and try again */
2119 if (!committed) {
2120 committed = 1;
2121 trans = btrfs_join_transaction(root, 1);
2122 if (!trans)
2123 return -ENOMEM;
2124 ret = btrfs_commit_transaction(trans, root);
2125 if (ret)
2126 return ret;
2127 goto again;
2128 }
2129
2093 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" 2130 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2094 ", %llu bytes_used, %llu bytes_reserved, " 2131 ", %llu bytes_used, %llu bytes_reserved, "
2095 "%llu bytes_pinned, %llu bytes_readonly, %llu may use" 2132 "%llu bytes_pinned, %llu bytes_readonly, %llu may use"