summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDennis Zhou <dennis@kernel.org>2019-02-04 15:19:59 -0500
committerDavid Sterba <dsterba@suse.com>2019-02-25 08:13:31 -0500
commitca4ac360af94964906149efe166453ac83ae7c43 (patch)
tree7f236453654db2b790e84bbf978260416a1f01bc
parentacce85de12e68baaef77719685dd8d026a94e7dc (diff)
btrfs: manage heuristic workspace as index 0
While the heuristic workspaces aren't really compression workspaces, they use the same interface for managing them. So rather than branching, let's just handle them once again as the index 0 compression type. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Dennis Zhou <dennis@kernel.org> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/compression.c112
-rw-r--r--fs/btrfs/compression.h4
2 files changed, 34 insertions, 82 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d098df768b67..7034cf2749e6 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -769,6 +769,11 @@ fail:
769 return ERR_PTR(-ENOMEM); 769 return ERR_PTR(-ENOMEM);
770} 770}
771 771
772const struct btrfs_compress_op btrfs_heuristic_compress = {
773 .alloc_workspace = alloc_heuristic_ws,
774 .free_workspace = free_heuristic_ws,
775};
776
772struct workspace_manager { 777struct workspace_manager {
773 struct list_head idle_ws; 778 struct list_head idle_ws;
774 spinlock_t ws_lock; 779 spinlock_t ws_lock;
@@ -780,11 +785,11 @@ struct workspace_manager {
780 wait_queue_head_t ws_wait; 785 wait_queue_head_t ws_wait;
781}; 786};
782 787
783static struct workspace_manager wsm[BTRFS_COMPRESS_TYPES]; 788static struct workspace_manager wsm[BTRFS_NR_WORKSPACE_MANAGERS];
784
785static struct workspace_manager btrfs_heuristic_ws;
786 789
787static const struct btrfs_compress_op * const btrfs_compress_op[] = { 790static const struct btrfs_compress_op * const btrfs_compress_op[] = {
791 /* The heuristic is represented as compression type 0 */
792 &btrfs_heuristic_compress,
788 &btrfs_zlib_compress, 793 &btrfs_zlib_compress,
789 &btrfs_lzo_compress, 794 &btrfs_lzo_compress,
790 &btrfs_zstd_compress, 795 &btrfs_zstd_compress,
@@ -795,22 +800,7 @@ void __init btrfs_init_compress(void)
795 struct list_head *workspace; 800 struct list_head *workspace;
796 int i; 801 int i;
797 802
798 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); 803 for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
799 spin_lock_init(&btrfs_heuristic_ws.ws_lock);
800 atomic_set(&btrfs_heuristic_ws.total_ws, 0);
801 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
802
803 workspace = alloc_heuristic_ws();
804 if (IS_ERR(workspace)) {
805 pr_warn(
806 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
807 } else {
808 atomic_set(&btrfs_heuristic_ws.total_ws, 1);
809 btrfs_heuristic_ws.free_ws = 1;
810 list_add(workspace, &btrfs_heuristic_ws.idle_ws);
811 }
812
813 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
814 INIT_LIST_HEAD(&wsm[i].idle_ws); 804 INIT_LIST_HEAD(&wsm[i].idle_ws);
815 spin_lock_init(&wsm[i].ws_lock); 805 spin_lock_init(&wsm[i].ws_lock);
816 atomic_set(&wsm[i].total_ws, 0); 806 atomic_set(&wsm[i].total_ws, 0);
@@ -837,11 +827,10 @@ void __init btrfs_init_compress(void)
837 * Preallocation makes a forward progress guarantees and we do not return 827 * Preallocation makes a forward progress guarantees and we do not return
838 * errors. 828 * errors.
839 */ 829 */
840static struct list_head *__find_workspace(int type, bool heuristic) 830static struct list_head *find_workspace(int type)
841{ 831{
842 struct list_head *workspace; 832 struct list_head *workspace;
843 int cpus = num_online_cpus(); 833 int cpus = num_online_cpus();
844 int idx = type - 1;
845 unsigned nofs_flag; 834 unsigned nofs_flag;
846 struct list_head *idle_ws; 835 struct list_head *idle_ws;
847 spinlock_t *ws_lock; 836 spinlock_t *ws_lock;
@@ -849,19 +838,11 @@ static struct list_head *__find_workspace(int type, bool heuristic)
849 wait_queue_head_t *ws_wait; 838 wait_queue_head_t *ws_wait;
850 int *free_ws; 839 int *free_ws;
851 840
852 if (heuristic) { 841 idle_ws = &wsm[type].idle_ws;
853 idle_ws = &btrfs_heuristic_ws.idle_ws; 842 ws_lock = &wsm[type].ws_lock;
854 ws_lock = &btrfs_heuristic_ws.ws_lock; 843 total_ws = &wsm[type].total_ws;
855 total_ws = &btrfs_heuristic_ws.total_ws; 844 ws_wait = &wsm[type].ws_wait;
856 ws_wait = &btrfs_heuristic_ws.ws_wait; 845 free_ws = &wsm[type].free_ws;
857 free_ws = &btrfs_heuristic_ws.free_ws;
858 } else {
859 idle_ws = &wsm[idx].idle_ws;
860 ws_lock = &wsm[idx].ws_lock;
861 total_ws = &wsm[idx].total_ws;
862 ws_wait = &wsm[idx].ws_wait;
863 free_ws = &wsm[idx].free_ws;
864 }
865 846
866again: 847again:
867 spin_lock(ws_lock); 848 spin_lock(ws_lock);
@@ -892,10 +873,7 @@ again:
892 * context of btrfs_compress_bio/btrfs_compress_pages 873 * context of btrfs_compress_bio/btrfs_compress_pages
893 */ 874 */
894 nofs_flag = memalloc_nofs_save(); 875 nofs_flag = memalloc_nofs_save();
895 if (heuristic) 876 workspace = btrfs_compress_op[type]->alloc_workspace();
896 workspace = alloc_heuristic_ws();
897 else
898 workspace = btrfs_compress_op[idx]->alloc_workspace();
899 memalloc_nofs_restore(nofs_flag); 877 memalloc_nofs_restore(nofs_flag);
900 878
901 if (IS_ERR(workspace)) { 879 if (IS_ERR(workspace)) {
@@ -926,38 +904,23 @@ again:
926 return workspace; 904 return workspace;
927} 905}
928 906
929static struct list_head *find_workspace(int type)
930{
931 return __find_workspace(type, false);
932}
933
934/* 907/*
935 * put a workspace struct back on the list or free it if we have enough 908 * put a workspace struct back on the list or free it if we have enough
936 * idle ones sitting around 909 * idle ones sitting around
937 */ 910 */
938static void __free_workspace(int type, struct list_head *workspace, 911static void free_workspace(int type, struct list_head *workspace)
939 bool heuristic)
940{ 912{
941 int idx = type - 1;
942 struct list_head *idle_ws; 913 struct list_head *idle_ws;
943 spinlock_t *ws_lock; 914 spinlock_t *ws_lock;
944 atomic_t *total_ws; 915 atomic_t *total_ws;
945 wait_queue_head_t *ws_wait; 916 wait_queue_head_t *ws_wait;
946 int *free_ws; 917 int *free_ws;
947 918
948 if (heuristic) { 919 idle_ws = &wsm[type].idle_ws;
949 idle_ws = &btrfs_heuristic_ws.idle_ws; 920 ws_lock = &wsm[type].ws_lock;
950 ws_lock = &btrfs_heuristic_ws.ws_lock; 921 total_ws = &wsm[type].total_ws;
951 total_ws = &btrfs_heuristic_ws.total_ws; 922 ws_wait = &wsm[type].ws_wait;
952 ws_wait = &btrfs_heuristic_ws.ws_wait; 923 free_ws = &wsm[type].free_ws;
953 free_ws = &btrfs_heuristic_ws.free_ws;
954 } else {
955 idle_ws = &wsm[idx].idle_ws;
956 ws_lock = &wsm[idx].ws_lock;
957 total_ws = &wsm[idx].total_ws;
958 ws_wait = &wsm[idx].ws_wait;
959 free_ws = &wsm[idx].free_ws;
960 }
961 924
962 spin_lock(ws_lock); 925 spin_lock(ws_lock);
963 if (*free_ws <= num_online_cpus()) { 926 if (*free_ws <= num_online_cpus()) {
@@ -968,20 +931,12 @@ static void __free_workspace(int type, struct list_head *workspace,
968 } 931 }
969 spin_unlock(ws_lock); 932 spin_unlock(ws_lock);
970 933
971 if (heuristic) 934 btrfs_compress_op[type]->free_workspace(workspace);
972 free_heuristic_ws(workspace);
973 else
974 btrfs_compress_op[idx]->free_workspace(workspace);
975 atomic_dec(total_ws); 935 atomic_dec(total_ws);
976wake: 936wake:
977 cond_wake_up(ws_wait); 937 cond_wake_up(ws_wait);
978} 938}
979 939
980static void free_workspace(int type, struct list_head *ws)
981{
982 return __free_workspace(type, ws, false);
983}
984
985/* 940/*
986 * cleanup function for module exit 941 * cleanup function for module exit
987 */ 942 */
@@ -990,14 +945,7 @@ static void free_workspaces(void)
990 struct list_head *workspace; 945 struct list_head *workspace;
991 int i; 946 int i;
992 947
993 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { 948 for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
994 workspace = btrfs_heuristic_ws.idle_ws.next;
995 list_del(workspace);
996 free_heuristic_ws(workspace);
997 atomic_dec(&btrfs_heuristic_ws.total_ws);
998 }
999
1000 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1001 while (!list_empty(&wsm[i].idle_ws)) { 949 while (!list_empty(&wsm[i].idle_ws)) {
1002 workspace = wsm[i].idle_ws.next; 950 workspace = wsm[i].idle_ws.next;
1003 list_del(workspace); 951 list_del(workspace);
@@ -1042,8 +990,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1042 990
1043 workspace = find_workspace(type); 991 workspace = find_workspace(type);
1044 992
1045 btrfs_compress_op[type - 1]->set_level(workspace, type_level); 993 btrfs_compress_op[type]->set_level(workspace, type_level);
1046 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, 994 ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
1047 start, pages, 995 start, pages,
1048 out_pages, 996 out_pages,
1049 total_in, total_out); 997 total_in, total_out);
@@ -1072,7 +1020,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
1072 int type = cb->compress_type; 1020 int type = cb->compress_type;
1073 1021
1074 workspace = find_workspace(type); 1022 workspace = find_workspace(type);
1075 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); 1023 ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
1076 free_workspace(type, workspace); 1024 free_workspace(type, workspace);
1077 1025
1078 return ret; 1026 return ret;
@@ -1091,7 +1039,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1091 1039
1092 workspace = find_workspace(type); 1040 workspace = find_workspace(type);
1093 1041
1094 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, 1042 ret = btrfs_compress_op[type]->decompress(workspace, data_in,
1095 dest_page, start_byte, 1043 dest_page, start_byte,
1096 srclen, destlen); 1044 srclen, destlen);
1097 1045
@@ -1512,7 +1460,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1512 */ 1460 */
1513int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) 1461int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1514{ 1462{
1515 struct list_head *ws_list = __find_workspace(0, true); 1463 struct list_head *ws_list = find_workspace(0);
1516 struct heuristic_ws *ws; 1464 struct heuristic_ws *ws;
1517 u32 i; 1465 u32 i;
1518 u8 byte; 1466 u8 byte;
@@ -1581,7 +1529,7 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1581 } 1529 }
1582 1530
1583out: 1531out:
1584 __free_workspace(0, ws_list, true); 1532 free_workspace(0, ws_list);
1585 return ret; 1533 return ret;
1586} 1534}
1587 1535
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 004db0b3111b..9a0e73c65b87 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -132,6 +132,10 @@ struct btrfs_compress_op {
132 void (*set_level)(struct list_head *ws, unsigned int type); 132 void (*set_level)(struct list_head *ws, unsigned int type);
133}; 133};
134 134
135/* The heuristic workspaces are managed via the 0th workspace manager */
136#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1)
137
138extern const struct btrfs_compress_op btrfs_heuristic_compress;
135extern const struct btrfs_compress_op btrfs_zlib_compress; 139extern const struct btrfs_compress_op btrfs_zlib_compress;
136extern const struct btrfs_compress_op btrfs_lzo_compress; 140extern const struct btrfs_compress_op btrfs_lzo_compress;
137extern const struct btrfs_compress_op btrfs_zstd_compress; 141extern const struct btrfs_compress_op btrfs_zstd_compress;