aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGao Xiang <gaoxiang25@huawei.com>2018-07-26 08:22:07 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-07-27 11:24:10 -0400
commit105d4ad857dcbf3dc1288f339c5b09dafbc8f923 (patch)
tree1024b2a9b33a67d7a2a483cfe6e48c478c23e7e3
parent3883a79abd02272222a214a5f84395d41eecdc84 (diff)
staging: erofs: introduce cached decompression
This patch adds an optional choice which can be enabled by users in order to cache both incomplete ends of compressed clusters as a complement to the in-place decompression in order to boost random read, but it costs more memory than the in-place decompression only. Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/staging/erofs/Kconfig38
-rw-r--r--drivers/staging/erofs/internal.h26
-rw-r--r--drivers/staging/erofs/super.c73
-rw-r--r--drivers/staging/erofs/unzip_vle.c274
-rw-r--r--drivers/staging/erofs/utils.c17
5 files changed, 427 insertions, 1 deletions
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index b55ce1cf3bc7..663b755bf2fb 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -101,3 +101,41 @@ config EROFS_FS_CLUSTER_PAGE_LIMIT
101 than 2. Otherwise, the image cannot be mounted 101 than 2. Otherwise, the image cannot be mounted
102 correctly on this kernel. 102 correctly on this kernel.
103 103
104choice
105 prompt "EROFS VLE Data Decompression mode"
106 depends on EROFS_FS_ZIP
107 default EROFS_FS_ZIP_CACHE_BIPOLAR
108 help
109 EROFS supports three options for VLE decompression.
110 "In-place Decompression Only" consumes the minimum memory
111 with lowest random read.
112
113 "Bipolar Cached Decompression" consumes the maximum memory
114 with highest random read.
115
116 If unsure, select "Bipolar Cached Decompression"
117
118config EROFS_FS_ZIP_NO_CACHE
119 bool "In-place Decompression Only"
120 help
121 Read compressed data into page cache and do in-place
122 decompression directly.
123
124config EROFS_FS_ZIP_CACHE_UNIPOLAR
125 bool "Unipolar Cached Decompression"
126 help
127 For each request, it caches the last compressed page
128 for further reading.
129 It still decompresses in place for the rest compressed pages.
130
131config EROFS_FS_ZIP_CACHE_BIPOLAR
132 bool "Bipolar Cached Decompression"
133 help
134 For each request, it caches the both end compressed pages
135 for further reading.
136 It still decompresses in place for the rest compressed pages.
137
138 Recommended for performance priority.
139
140endchoice
141
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 3adec7d95d3e..669f93ae6920 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -58,6 +58,18 @@ struct erofs_fault_info {
58}; 58};
59#endif 59#endif
60 60
61#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
62#define EROFS_FS_ZIP_CACHE_LVL (2)
63#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
64#define EROFS_FS_ZIP_CACHE_LVL (1)
65#else
66#define EROFS_FS_ZIP_CACHE_LVL (0)
67#endif
68
69#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
70#define EROFS_FS_HAS_MANAGED_CACHE
71#endif
72
61/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ 73/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
62#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 74#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
63 75
@@ -82,6 +94,11 @@ struct erofs_sb_info {
82 94
83 /* the dedicated workstation for compression */ 95 /* the dedicated workstation for compression */
84 struct radix_tree_root workstn_tree; 96 struct radix_tree_root workstn_tree;
97
98#ifdef EROFS_FS_HAS_MANAGED_CACHE
99 struct inode *managed_cache;
100#endif
101
85#endif 102#endif
86 103
87 u32 build_time_nsec; 104 u32 build_time_nsec;
@@ -240,6 +257,15 @@ static inline void erofs_workstation_cleanup_all(struct super_block *sb)
240 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true); 257 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
241} 258}
242 259
260#ifdef EROFS_FS_HAS_MANAGED_CACHE
261#define EROFS_UNALLOCATED_CACHED_PAGE ((void *)0x5F0EF00D)
262
263extern int try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
264 struct erofs_workgroup *egrp);
265extern int try_to_free_cached_page(struct address_space *mapping,
266 struct page *page);
267#endif
268
243#endif 269#endif
244 270
245/* we strictly follow PAGE_SIZE and no buffer head yet */ 271/* we strictly follow PAGE_SIZE and no buffer head yet */
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 2bd433ab4c49..97da5c8a8ef3 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -256,6 +256,63 @@ static int parse_options(struct super_block *sb, char *options)
256 return 0; 256 return 0;
257} 257}
258 258
259#ifdef EROFS_FS_HAS_MANAGED_CACHE
260
261static const struct address_space_operations managed_cache_aops;
262
263static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
264{
265 int ret = 1; /* 0 - busy */
266 struct address_space *const mapping = page->mapping;
267
268 BUG_ON(!PageLocked(page));
269 BUG_ON(mapping->a_ops != &managed_cache_aops);
270
271 if (PagePrivate(page))
272 ret = try_to_free_cached_page(mapping, page);
273
274 return ret;
275}
276
277static void managed_cache_invalidatepage(struct page *page,
278 unsigned int offset, unsigned int length)
279{
280 const unsigned int stop = length + offset;
281
282 BUG_ON(!PageLocked(page));
283
284 /* Check for overflow */
285 BUG_ON(stop > PAGE_SIZE || stop < length);
286
287 if (offset == 0 && stop == PAGE_SIZE)
288 while (!managed_cache_releasepage(page, GFP_NOFS))
289 cond_resched();
290}
291
292static const struct address_space_operations managed_cache_aops = {
293 .releasepage = managed_cache_releasepage,
294 .invalidatepage = managed_cache_invalidatepage,
295};
296
297static struct inode *erofs_init_managed_cache(struct super_block *sb)
298{
299 struct inode *inode = new_inode(sb);
300
301 if (unlikely(inode == NULL))
302 return ERR_PTR(-ENOMEM);
303
304 set_nlink(inode, 1);
305 inode->i_size = OFFSET_MAX;
306
307 inode->i_mapping->a_ops = &managed_cache_aops;
308 mapping_set_gfp_mask(inode->i_mapping,
309 GFP_NOFS | __GFP_HIGHMEM |
310 __GFP_MOVABLE | __GFP_NOFAIL);
311 return inode;
312}
313
314#endif
315
259static int erofs_read_super(struct super_block *sb, 316static int erofs_read_super(struct super_block *sb,
260 const char *dev_name, void *data, int silent) 317 const char *dev_name, void *data, int silent)
261{ 318{
@@ -307,6 +364,14 @@ static int erofs_read_super(struct super_block *sb,
307 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); 364 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
308#endif 365#endif
309 366
367#ifdef EROFS_FS_HAS_MANAGED_CACHE
368 sbi->managed_cache = erofs_init_managed_cache(sb);
369 if (IS_ERR(sbi->managed_cache)) {
370 err = PTR_ERR(sbi->managed_cache);
371 goto err_init_managed_cache;
372 }
373#endif
374
310 /* get the root inode */ 375 /* get the root inode */
311 inode = erofs_iget(sb, ROOT_NID(sbi), true); 376 inode = erofs_iget(sb, ROOT_NID(sbi), true);
312 if (IS_ERR(inode)) { 377 if (IS_ERR(inode)) {
@@ -361,6 +426,10 @@ err_isdir:
361 if (sb->s_root == NULL) 426 if (sb->s_root == NULL)
362 iput(inode); 427 iput(inode);
363err_iget: 428err_iget:
429#ifdef EROFS_FS_HAS_MANAGED_CACHE
430 iput(sbi->managed_cache);
431err_init_managed_cache:
432#endif
364err_parseopt: 433err_parseopt:
365err_sbread: 434err_sbread:
366 sb->s_fs_info = NULL; 435 sb->s_fs_info = NULL;
@@ -386,6 +455,10 @@ static void erofs_put_super(struct super_block *sb)
386 infoln("unmounted for %s", sbi->dev_name); 455 infoln("unmounted for %s", sbi->dev_name);
387 __putname(sbi->dev_name); 456 __putname(sbi->dev_name);
388 457
458#ifdef EROFS_FS_HAS_MANAGED_CACHE
459 iput(sbi->managed_cache);
460#endif
461
389 mutex_lock(&sbi->umount_mutex); 462 mutex_lock(&sbi->umount_mutex);
390 463
391#ifdef CONFIG_EROFS_FS_ZIP 464#ifdef CONFIG_EROFS_FS_ZIP
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index f0ead60a8fee..7671fe8194ce 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -95,6 +95,111 @@ struct z_erofs_vle_work_builder {
95#define VLE_WORK_BUILDER_INIT() \ 95#define VLE_WORK_BUILDER_INIT() \
96 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED } 96 { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
97 97
98#ifdef EROFS_FS_HAS_MANAGED_CACHE
99
100static bool grab_managed_cache_pages(struct address_space *mapping,
101 erofs_blk_t start,
102 struct page **compressed_pages,
103 int clusterblks,
104 bool reserve_allocation)
105{
106 bool noio = true;
107 unsigned int i;
108
109 /* TODO: optimize by introducing find_get_pages_range */
110 for (i = 0; i < clusterblks; ++i) {
111 struct page *page, *found;
112
113 if (READ_ONCE(compressed_pages[i]) != NULL)
114 continue;
115
116 page = found = find_get_page(mapping, start + i);
117 if (found == NULL) {
118 noio = false;
119 if (!reserve_allocation)
120 continue;
121 page = EROFS_UNALLOCATED_CACHED_PAGE;
122 }
123
124 if (NULL == cmpxchg(compressed_pages + i, NULL, page))
125 continue;
126
127 if (found != NULL)
128 put_page(found);
129 }
130 return noio;
131}
132
133/* called by erofs_shrinker to get rid of all compressed_pages */
134int try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
135 struct erofs_workgroup *egrp)
136{
137 struct z_erofs_vle_workgroup *const grp =
138 container_of(egrp, struct z_erofs_vle_workgroup, obj);
139 struct address_space *const mapping = sbi->managed_cache->i_mapping;
140 const int clusterpages = erofs_clusterpages(sbi);
141 int i;
142
143 /*
144 * refcount of workgroup is now freezed as 1,
145 * therefore no need to worry about available decompression users.
146 */
147 for (i = 0; i < clusterpages; ++i) {
148 struct page *page = grp->compressed_pages[i];
149
150 if (page == NULL || page->mapping != mapping)
151 continue;
152
153 /* block other users from reclaiming or migrating the page */
154 if (!trylock_page(page))
155 return -EBUSY;
156
157 /* barrier is implied in the following 'unlock_page' */
158 WRITE_ONCE(grp->compressed_pages[i], NULL);
159
160 set_page_private(page, 0);
161 ClearPagePrivate(page);
162
163 unlock_page(page);
164 put_page(page);
165 }
166 return 0;
167}
168
169int try_to_free_cached_page(struct address_space *mapping, struct page *page)
170{
171 struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
172 const unsigned int clusterpages = erofs_clusterpages(sbi);
173
174 struct z_erofs_vle_workgroup *grp;
175 int ret = 0; /* 0 - busy */
176
177 /* prevent the workgroup from being freed */
178 rcu_read_lock();
179 grp = (void *)page_private(page);
180
181 if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
182 unsigned int i;
183
184 for (i = 0; i < clusterpages; ++i) {
185 if (grp->compressed_pages[i] == page) {
186 WRITE_ONCE(grp->compressed_pages[i], NULL);
187 ret = 1;
188 break;
189 }
190 }
191 erofs_workgroup_unfreeze(&grp->obj, 1);
192 }
193 rcu_read_unlock();
194
195 if (ret) {
196 ClearPagePrivate(page);
197 put_page(page);
198 }
199 return ret;
200}
201#endif
202
98/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ 203/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
99static inline bool try_to_reuse_as_compressed_page( 204static inline bool try_to_reuse_as_compressed_page(
100 struct z_erofs_vle_work_builder *b, 205 struct z_erofs_vle_work_builder *b,
@@ -463,6 +568,9 @@ struct z_erofs_vle_frontend {
463 z_erofs_vle_owned_workgrp_t owned_head; 568 z_erofs_vle_owned_workgrp_t owned_head;
464 569
465 bool initial; 570 bool initial;
571#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
572 erofs_off_t cachedzone_la;
573#endif
466}; 574};
467 575
468#define VLE_FRONTEND_INIT(__i) { \ 576#define VLE_FRONTEND_INIT(__i) { \
@@ -489,6 +597,12 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
489 bool tight = builder_is_followed(builder); 597 bool tight = builder_is_followed(builder);
490 struct z_erofs_vle_work *work = builder->work; 598 struct z_erofs_vle_work *work = builder->work;
491 599
600#ifdef EROFS_FS_HAS_MANAGED_CACHE
601 struct address_space *const mngda = sbi->managed_cache->i_mapping;
602 struct z_erofs_vle_workgroup *grp;
603 bool noio_outoforder;
604#endif
605
492 enum z_erofs_page_type page_type; 606 enum z_erofs_page_type page_type;
493 unsigned cur, end, spiltted, index; 607 unsigned cur, end, spiltted, index;
494 int err; 608 int err;
@@ -529,6 +643,21 @@ repeat:
529 if (unlikely(err)) 643 if (unlikely(err))
530 goto err_out; 644 goto err_out;
531 645
646#ifdef EROFS_FS_HAS_MANAGED_CACHE
647 grp = fe->builder.grp;
648
649 /* let's do out-of-order decompression for noio */
650 noio_outoforder = grab_managed_cache_pages(mngda,
651 erofs_blknr(map->m_pa),
652 grp->compressed_pages, erofs_blknr(map->m_plen),
653 /* compressed page caching selection strategy */
654 fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
655 map->m_la < fe->cachedzone_la : 0));
656
657 if (noio_outoforder && builder_is_followed(builder))
658 builder->role = Z_EROFS_VLE_WORK_PRIMARY;
659#endif
660
532 tight &= builder_is_followed(builder); 661 tight &= builder_is_followed(builder);
533 work = builder->work; 662 work = builder->work;
534hitted: 663hitted:
@@ -607,15 +736,39 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
607 const blk_status_t err = bio->bi_status; 736 const blk_status_t err = bio->bi_status;
608 unsigned i; 737 unsigned i;
609 struct bio_vec *bvec; 738 struct bio_vec *bvec;
739#ifdef EROFS_FS_HAS_MANAGED_CACHE
740 struct address_space *mngda = NULL;
741#endif
610 742
611 bio_for_each_segment_all(bvec, bio, i) { 743 bio_for_each_segment_all(bvec, bio, i) {
612 struct page *page = bvec->bv_page; 744 struct page *page = bvec->bv_page;
745 bool cachemngd = false;
613 746
614 DBG_BUGON(PageUptodate(page)); 747 DBG_BUGON(PageUptodate(page));
615 BUG_ON(page->mapping == NULL); 748 BUG_ON(page->mapping == NULL);
616 749
750#ifdef EROFS_FS_HAS_MANAGED_CACHE
751 if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
752 struct inode *const inode = page->mapping->host;
753 struct super_block *const sb = inode->i_sb;
754
755 mngda = EROFS_SB(sb)->managed_cache->i_mapping;
756 }
757
758 /*
759 * If mngda has not gotten, it equals NULL,
760 * however, page->mapping never be NULL if working properly.
761 */
762 cachemngd = (page->mapping == mngda);
763#endif
764
617 if (unlikely(err)) 765 if (unlikely(err))
618 SetPageError(page); 766 SetPageError(page);
767 else if (cachemngd)
768 SetPageUptodate(page);
769
770 if (cachemngd)
771 unlock_page(page);
619 } 772 }
620 773
621 z_erofs_vle_unzip_kickoff(bio->bi_private, -1); 774 z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
@@ -630,6 +783,9 @@ static int z_erofs_vle_unzip(struct super_block *sb,
630 struct list_head *page_pool) 783 struct list_head *page_pool)
631{ 784{
632 struct erofs_sb_info *const sbi = EROFS_SB(sb); 785 struct erofs_sb_info *const sbi = EROFS_SB(sb);
786#ifdef EROFS_FS_HAS_MANAGED_CACHE
787 struct address_space *const mngda = sbi->managed_cache->i_mapping;
788#endif
633 const unsigned clusterpages = erofs_clusterpages(sbi); 789 const unsigned clusterpages = erofs_clusterpages(sbi);
634 790
635 struct z_erofs_pagevec_ctor ctor; 791 struct z_erofs_pagevec_ctor ctor;
@@ -727,6 +883,13 @@ repeat:
727 883
728 if (z_erofs_is_stagingpage(page)) 884 if (z_erofs_is_stagingpage(page))
729 continue; 885 continue;
886#ifdef EROFS_FS_HAS_MANAGED_CACHE
887 else if (page->mapping == mngda) {
888 BUG_ON(PageLocked(page));
889 BUG_ON(!PageUptodate(page));
890 continue;
891 }
892#endif
730 893
731 /* only non-head page could be reused as a compressed page */ 894 /* only non-head page could be reused as a compressed page */
732 pagenr = z_erofs_onlinepage_index(page); 895 pagenr = z_erofs_onlinepage_index(page);
@@ -804,6 +967,10 @@ out_percpu:
804 for (i = 0; i < clusterpages; ++i) { 967 for (i = 0; i < clusterpages; ++i) {
805 page = compressed_pages[i]; 968 page = compressed_pages[i];
806 969
970#ifdef EROFS_FS_HAS_MANAGED_CACHE
971 if (page->mapping == mngda)
972 continue;
973#endif
807 /* recycle all individual staging pages */ 974 /* recycle all individual staging pages */
808 (void)z_erofs_gather_if_stagingpage(page_pool, page); 975 (void)z_erofs_gather_if_stagingpage(page_pool, page);
809 976
@@ -898,7 +1065,31 @@ out:
898 return io; 1065 return io;
899} 1066}
900 1067
1068#ifdef EROFS_FS_HAS_MANAGED_CACHE
1069/* true - unlocked (noio), false - locked (need submit io) */
1070static inline bool recover_managed_page(struct z_erofs_vle_workgroup *grp,
1071 struct page *page)
1072{
1073 wait_on_page_locked(page);
1074 if (PagePrivate(page) && PageUptodate(page))
1075 return true;
1076
1077 lock_page(page);
1078 if (unlikely(!PagePrivate(page))) {
1079 set_page_private(page, (unsigned long)grp);
1080 SetPagePrivate(page);
1081 }
1082 if (unlikely(PageUptodate(page))) {
1083 unlock_page(page);
1084 return true;
1085 }
1086 return false;
1087}
1088
1089#define __FSIO_1 1
1090#else
901#define __FSIO_1 0 1091#define __FSIO_1 0
1092#endif
902 1093
903static bool z_erofs_vle_submit_all(struct super_block *sb, 1094static bool z_erofs_vle_submit_all(struct super_block *sb,
904 z_erofs_vle_owned_workgrp_t owned_head, 1095 z_erofs_vle_owned_workgrp_t owned_head,
@@ -909,6 +1100,10 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
909 struct erofs_sb_info *const sbi = EROFS_SB(sb); 1100 struct erofs_sb_info *const sbi = EROFS_SB(sb);
910 const unsigned clusterpages = erofs_clusterpages(sbi); 1101 const unsigned clusterpages = erofs_clusterpages(sbi);
911 const gfp_t gfp = GFP_NOFS; 1102 const gfp_t gfp = GFP_NOFS;
1103#ifdef EROFS_FS_HAS_MANAGED_CACHE
1104 struct address_space *const mngda = sbi->managed_cache->i_mapping;
1105 struct z_erofs_vle_workgroup *lstgrp_noio = NULL, *lstgrp_io = NULL;
1106#endif
912 struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1]; 1107 struct z_erofs_vle_unzip_io *ios[1 + __FSIO_1];
913 struct bio *bio; 1108 struct bio *bio;
914 tagptr1_t bi_private; 1109 tagptr1_t bi_private;
@@ -924,6 +1119,10 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
924 * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io 1119 * force_fg == 1, (io, fg_io[0]) no io, (io, fg_io[1]) need submit io
925 * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io 1120 * force_fg == 0, (io, fg_io[0]) no io; (io[1], bg_io) need submit io
926 */ 1121 */
1122#ifdef EROFS_FS_HAS_MANAGED_CACHE
1123 ios[0] = prepare_io_handler(sb, fg_io + 0, false);
1124#endif
1125
927 if (force_fg) { 1126 if (force_fg) {
928 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false); 1127 ios[__FSIO_1] = prepare_io_handler(sb, fg_io + __FSIO_1, false);
929 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0); 1128 bi_private = tagptr_fold(tagptr1_t, ios[__FSIO_1], 0);
@@ -944,6 +1143,10 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
944 struct page **compressed_pages, *oldpage, *page; 1143 struct page **compressed_pages, *oldpage, *page;
945 pgoff_t first_index; 1144 pgoff_t first_index;
946 unsigned i = 0; 1145 unsigned i = 0;
1146#ifdef EROFS_FS_HAS_MANAGED_CACHE
1147 unsigned int noio = 0;
1148 bool cachemngd;
1149#endif
947 int err; 1150 int err;
948 1151
949 /* no possible 'owned_head' equals the following */ 1152 /* no possible 'owned_head' equals the following */
@@ -964,15 +1167,40 @@ repeat:
964 /* fulfill all compressed pages */ 1167 /* fulfill all compressed pages */
965 oldpage = page = READ_ONCE(compressed_pages[i]); 1168 oldpage = page = READ_ONCE(compressed_pages[i]);
966 1169
1170#ifdef EROFS_FS_HAS_MANAGED_CACHE
1171 cachemngd = false;
1172
1173 if (page == EROFS_UNALLOCATED_CACHED_PAGE) {
1174 cachemngd = true;
1175 goto do_allocpage;
1176 } else if (page != NULL) {
1177 if (page->mapping != mngda)
1178 BUG_ON(PageUptodate(page));
1179 else if (recover_managed_page(grp, page)) {
1180 /* page is uptodate, skip io submission */
1181 force_submit = true;
1182 ++noio;
1183 goto skippage;
1184 }
1185 } else {
1186do_allocpage:
1187#else
967 if (page != NULL) 1188 if (page != NULL)
968 BUG_ON(PageUptodate(page)); 1189 BUG_ON(PageUptodate(page));
969 else { 1190 else {
1191#endif
970 page = __stagingpage_alloc(pagepool, gfp); 1192 page = __stagingpage_alloc(pagepool, gfp);
971 1193
972 if (oldpage != cmpxchg(compressed_pages + i, 1194 if (oldpage != cmpxchg(compressed_pages + i,
973 oldpage, page)) { 1195 oldpage, page)) {
974 list_add(&page->lru, pagepool); 1196 list_add(&page->lru, pagepool);
975 goto repeat; 1197 goto repeat;
1198#ifdef EROFS_FS_HAS_MANAGED_CACHE
1199 } else if (cachemngd && !add_to_page_cache_lru(page,
1200 mngda, first_index + i, gfp)) {
1201 set_page_private(page, (unsigned long)grp);
1202 SetPagePrivate(page);
1203#endif
976 } 1204 }
977 } 1205 }
978 1206
@@ -996,14 +1224,51 @@ submit_bio_retry:
996 1224
997 force_submit = false; 1225 force_submit = false;
998 last_index = first_index + i; 1226 last_index = first_index + i;
1227#ifdef EROFS_FS_HAS_MANAGED_CACHE
1228skippage:
1229#endif
999 if (++i < clusterpages) 1230 if (++i < clusterpages)
1000 goto repeat; 1231 goto repeat;
1232
1233#ifdef EROFS_FS_HAS_MANAGED_CACHE
1234 if (noio < clusterpages) {
1235 lstgrp_io = grp;
1236 } else {
1237 z_erofs_vle_owned_workgrp_t iogrp_next =
1238 owned_head == Z_EROFS_VLE_WORKGRP_TAIL ?
1239 Z_EROFS_VLE_WORKGRP_TAIL_CLOSED :
1240 owned_head;
1241
1242 if (lstgrp_io == NULL)
1243 ios[1]->head = iogrp_next;
1244 else
1245 WRITE_ONCE(lstgrp_io->next, iogrp_next);
1246
1247 if (lstgrp_noio == NULL)
1248 ios[0]->head = grp;
1249 else
1250 WRITE_ONCE(lstgrp_noio->next, grp);
1251
1252 lstgrp_noio = grp;
1253 }
1254#endif
1001 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL); 1255 } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1002 1256
1003 if (bio != NULL) 1257 if (bio != NULL)
1004 __submit_bio(bio, REQ_OP_READ, 0); 1258 __submit_bio(bio, REQ_OP_READ, 0);
1005 1259
1260#ifndef EROFS_FS_HAS_MANAGED_CACHE
1006 BUG_ON(!nr_bios); 1261 BUG_ON(!nr_bios);
1262#else
1263 if (lstgrp_noio != NULL)
1264 WRITE_ONCE(lstgrp_noio->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1265
1266 if (!force_fg && !nr_bios) {
1267 kvfree(container_of(ios[1],
1268 struct z_erofs_vle_unzip_io_sb, io));
1269 return true;
1270 }
1271#endif
1007 1272
1008 z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios); 1273 z_erofs_vle_unzip_kickoff(tagptr_cast_ptr(bi_private), nr_bios);
1009 return true; 1274 return true;
@@ -1019,6 +1284,9 @@ static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1019 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg)) 1284 if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1020 return; 1285 return;
1021 1286
1287#ifdef EROFS_FS_HAS_MANAGED_CACHE
1288 z_erofs_vle_unzip_all(sb, &io[0], pagepool);
1289#endif
1022 if (!force_fg) 1290 if (!force_fg)
1023 return; 1291 return;
1024 1292
@@ -1038,6 +1306,9 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
1038 int err; 1306 int err;
1039 LIST_HEAD(pagepool); 1307 LIST_HEAD(pagepool);
1040 1308
1309#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1310 f.cachedzone_la = page->index << PAGE_SHIFT;
1311#endif
1041 err = z_erofs_do_read_page(&f, page, &pagepool); 1312 err = z_erofs_do_read_page(&f, page, &pagepool);
1042 (void)z_erofs_vle_work_iter_end(&f.builder); 1313 (void)z_erofs_vle_work_iter_end(&f.builder);
1043 1314
@@ -1068,6 +1339,9 @@ static inline int __z_erofs_vle_normalaccess_readpages(
1068 struct page *head = NULL; 1339 struct page *head = NULL;
1069 LIST_HEAD(pagepool); 1340 LIST_HEAD(pagepool);
1070 1341
1342#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
1343 f.cachedzone_la = lru_to_page(pages)->index << PAGE_SHIFT;
1344#endif
1071 for (; nr_pages; --nr_pages) { 1345 for (; nr_pages; --nr_pages) {
1072 struct page *page = lru_to_page(pages); 1346 struct page *page = lru_to_page(pages);
1073 1347
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index 6530035f8a61..ee70bb9e1636 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -143,13 +143,28 @@ repeat:
143 if (cleanup) 143 if (cleanup)
144 BUG_ON(cnt != 1); 144 BUG_ON(cnt != 1);
145 145
146#ifndef EROFS_FS_HAS_MANAGED_CACHE
146 else if (cnt > 1) 147 else if (cnt > 1)
148#else
149 if (!erofs_workgroup_try_to_freeze(grp, 1))
150#endif
147 continue; 151 continue;
148 152
149 if (radix_tree_delete(&sbi->workstn_tree, 153 if (radix_tree_delete(&sbi->workstn_tree,
150 grp->index) != grp) 154 grp->index) != grp) {
155#ifdef EROFS_FS_HAS_MANAGED_CACHE
156skip:
157 erofs_workgroup_unfreeze(grp, 1);
158#endif
151 continue; 159 continue;
160 }
152 161
162#ifdef EROFS_FS_HAS_MANAGED_CACHE
163 if (try_to_free_all_cached_pages(sbi, grp))
164 goto skip;
165
166 erofs_workgroup_unfreeze(grp, 1);
167#endif
153 /* (rarely) grabbed again when freeing */ 168 /* (rarely) grabbed again when freeing */
154 erofs_workgroup_put(grp); 169 erofs_workgroup_put(grp);
155 170