aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorDavid Sterba <dsterba@suse.com>2016-04-26 20:41:17 -0400
committerDavid Sterba <dsterba@suse.com>2016-05-10 03:46:13 -0400
commite721e49dd1681d45d71919f0561f5e978a34153c (patch)
treef74acacc029a10df8ba68cf1e2c0d40564a6949e /fs/btrfs
parentf77dd0d6b2f0f2cf290cacbd48f5eee18586e52b (diff)
btrfs: make find_workspace always succeed
With just one preallocated workspace we can guarantee forward progress even if there's no memory available for new workspaces. The cost is more waiting but we also get rid of several error paths. On average, there will be several idle workspaces, so the waiting penalty won't be so bad. In the worst case, all cpus will compete for one workspace until there's some memory. Attempts to allocate a new one are done each time the waiters are woken up. Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 38c058bcf359..c70625560265 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -785,8 +785,10 @@ void __init btrfs_init_compress(void)
785} 785}
786 786
787/* 787/*
788 * this finds an available workspace or allocates a new one 788 * This finds an available workspace or allocates a new one.
789 * ERR_PTR is returned if things go bad. 789 * If it's not possible to allocate a new one, waits until there's one.
790 * Preallocation makes a forward progress guarantees and we do not return
791 * errors.
790 */ 792 */
791static struct list_head *find_workspace(int type) 793static struct list_head *find_workspace(int type)
792{ 794{
@@ -826,6 +828,14 @@ again:
826 if (IS_ERR(workspace)) { 828 if (IS_ERR(workspace)) {
827 atomic_dec(total_ws); 829 atomic_dec(total_ws);
828 wake_up(ws_wait); 830 wake_up(ws_wait);
831
832 /*
833 * Do not return the error but go back to waiting. There's a
834 * workspace preallocated for each type and the compression
835 * time is bounded so we get to a workspace eventually. This
836 * makes our caller's life easier.
837 */
838 goto again;
829 } 839 }
830 return workspace; 840 return workspace;
831} 841}
@@ -913,8 +923,6 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
913 int ret; 923 int ret;
914 924
915 workspace = find_workspace(type); 925 workspace = find_workspace(type);
916 if (IS_ERR(workspace))
917 return PTR_ERR(workspace);
918 926
919 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 927 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
920 start, len, pages, 928 start, len, pages,
@@ -949,8 +957,6 @@ static int btrfs_decompress_biovec(int type, struct page **pages_in,
949 int ret; 957 int ret;
950 958
951 workspace = find_workspace(type); 959 workspace = find_workspace(type);
952 if (IS_ERR(workspace))
953 return PTR_ERR(workspace);
954 960
955 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, 961 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
956 disk_start, 962 disk_start,
@@ -971,8 +977,6 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
971 int ret; 977 int ret;
972 978
973 workspace = find_workspace(type); 979 workspace = find_workspace(type);
974 if (IS_ERR(workspace))
975 return PTR_ERR(workspace);
976 980
977 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 981 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
978 dest_page, start_byte, 982 dest_page, start_byte,