aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGao Xiang <gaoxiang25@huawei.com>2018-07-26 08:22:05 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-07-27 11:24:10 -0400
commite7e9a307be9d75ecc3bf20b362af88140dfb4304 (patch)
treea640d916a61f5945c0f1f204fbdeafbd6471d330
parenta15813126272e5f81311e5e1330162baa40e5b0a (diff)
staging: erofs: introduce workstation for decompression
This patch introduces another concept used by the unzip subsystem called 'workstation'. It can be seen as a sparse array that stores pointers pointed to data structures related to the corresponding physical blocks. All lookup cases are protected by RCU read lock. Besides, reference count and spin_lock are also introduced to manage its lifetime and serialize all update operations. 'workstation' is currently implemented on the in-kernel radix tree approach for backward compatibility. With the evolution of linux kernel, it could be migrated into XArray implementation in the future. Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/staging/erofs/internal.h93
-rw-r--r--drivers/staging/erofs/super.c9
-rw-r--r--drivers/staging/erofs/utils.c81
3 files changed, 180 insertions, 3 deletions
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 42455f01c421..b07cd7aa0a09 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -79,6 +79,9 @@ struct erofs_sb_info {
79#ifdef CONFIG_EROFS_FS_ZIP 79#ifdef CONFIG_EROFS_FS_ZIP
80 /* cluster size in bit shift */ 80 /* cluster size in bit shift */
81 unsigned char clusterbits; 81 unsigned char clusterbits;
82
83 /* the dedicated workstation for compression */
84 struct radix_tree_root workstn_tree;
82#endif 85#endif
83 86
84 u32 build_time_nsec; 87 u32 build_time_nsec;
@@ -149,6 +152,96 @@ static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
149#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) 152#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
150#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) 153#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
151 154
155#ifdef CONFIG_EROFS_FS_ZIP
156#define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
157#define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
158
159/* basic unit of the workstation of a super_block */
160struct erofs_workgroup {
161 /* the workgroup index in the workstation */
162 pgoff_t index;
163
164 /* overall workgroup reference count */
165 atomic_t refcount;
166};
167
168#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
169
170static inline bool erofs_workgroup_try_to_freeze(
171 struct erofs_workgroup *grp, int v)
172{
173#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
174 if (v != atomic_cmpxchg(&grp->refcount,
175 v, EROFS_LOCKED_MAGIC))
176 return false;
177 preempt_disable();
178#else
179 preempt_disable();
180 if (atomic_read(&grp->refcount) != v) {
181 preempt_enable();
182 return false;
183 }
184#endif
185 return true;
186}
187
188static inline void erofs_workgroup_unfreeze(
189 struct erofs_workgroup *grp, int v)
190{
191#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
192 atomic_set(&grp->refcount, v);
193#endif
194 preempt_enable();
195}
196
197static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
198{
199 const int locked = (int)EROFS_LOCKED_MAGIC;
200 int o;
201
202repeat:
203 o = atomic_read(&grp->refcount);
204
205 /* spin if it is temporarily locked at the reclaim path */
206 if (unlikely(o == locked)) {
207#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
208 do
209 cpu_relax();
210 while (atomic_read(&grp->refcount) == locked);
211#endif
212 goto repeat;
213 }
214
215 if (unlikely(o <= 0))
216 return -1;
217
218 if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
219 goto repeat;
220
221 *ocnt = o;
222 return 0;
223}
224
225#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
226
227extern int erofs_workgroup_put(struct erofs_workgroup *grp);
228
229extern struct erofs_workgroup *erofs_find_workgroup(
230 struct super_block *sb, pgoff_t index, bool *tag);
231
232extern int erofs_register_workgroup(struct super_block *sb,
233 struct erofs_workgroup *grp, bool tag);
234
235extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
236 unsigned long nr_shrink, bool cleanup);
237
238static inline void erofs_workstation_cleanup_all(struct super_block *sb)
239{
240 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
241}
242
243#endif
244
152/* we strictly follow PAGE_SIZE and no buffer head yet */ 245/* we strictly follow PAGE_SIZE and no buffer head yet */
153#define LOG_BLOCK_SIZE PAGE_SHIFT 246#define LOG_BLOCK_SIZE PAGE_SHIFT
154 247
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index ef85884d47fb..e155a2b0d43e 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -296,6 +296,10 @@ static int erofs_read_super(struct super_block *sb,
296 if (!silent) 296 if (!silent)
297 infoln("root inode @ nid %llu", ROOT_NID(sbi)); 297 infoln("root inode @ nid %llu", ROOT_NID(sbi));
298 298
299#ifdef CONFIG_EROFS_FS_ZIP
300 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
301#endif
302
299 /* get the root inode */ 303 /* get the root inode */
300 inode = erofs_iget(sb, ROOT_NID(sbi), true); 304 inode = erofs_iget(sb, ROOT_NID(sbi), true);
301 if (IS_ERR(inode)) { 305 if (IS_ERR(inode)) {
@@ -376,6 +380,11 @@ static void erofs_put_super(struct super_block *sb)
376 __putname(sbi->dev_name); 380 __putname(sbi->dev_name);
377 381
378 mutex_lock(&sbi->umount_mutex); 382 mutex_lock(&sbi->umount_mutex);
383
384#ifdef CONFIG_EROFS_FS_ZIP
385 erofs_workstation_cleanup_all(sb);
386#endif
387
379 erofs_unregister_super(sb); 388 erofs_unregister_super(sb);
380 mutex_unlock(&sbi->umount_mutex); 389 mutex_unlock(&sbi->umount_mutex);
381 390
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index c1d83cecfec6..0d4eae2f79a8 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -29,6 +29,83 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
29 return page; 29 return page;
30} 30}
31 31
32/* global shrink count (for all mounted EROFS instances) */
33static atomic_long_t erofs_global_shrink_cnt;
34
35#ifdef CONFIG_EROFS_FS_ZIP
36
37/* radix_tree and the future XArray both don't use tagptr_t yet */
38struct erofs_workgroup *erofs_find_workgroup(
39 struct super_block *sb, pgoff_t index, bool *tag)
40{
41 struct erofs_sb_info *sbi = EROFS_SB(sb);
42 struct erofs_workgroup *grp;
43 int oldcount;
44
45repeat:
46 rcu_read_lock();
47 grp = radix_tree_lookup(&sbi->workstn_tree, index);
48 if (grp != NULL) {
49 *tag = radix_tree_exceptional_entry(grp);
50 grp = (void *)((unsigned long)grp &
51 ~RADIX_TREE_EXCEPTIONAL_ENTRY);
52
53 if (erofs_workgroup_get(grp, &oldcount)) {
54 /* prefer to relax rcu read side */
55 rcu_read_unlock();
56 goto repeat;
57 }
58
59 /* decrease refcount added by erofs_workgroup_put */
60 if (unlikely(oldcount == 1))
61 atomic_long_dec(&erofs_global_shrink_cnt);
62 BUG_ON(index != grp->index);
63 }
64 rcu_read_unlock();
65 return grp;
66}
67
68int erofs_register_workgroup(struct super_block *sb,
69 struct erofs_workgroup *grp,
70 bool tag)
71{
72 struct erofs_sb_info *sbi;
73 int err;
74
75 /* grp->refcount should not < 1 */
76 BUG_ON(!atomic_read(&grp->refcount));
77
78 err = radix_tree_preload(GFP_NOFS);
79 if (err)
80 return err;
81
82 sbi = EROFS_SB(sb);
83 erofs_workstn_lock(sbi);
84
85 if (tag)
86 grp = (void *)((unsigned long)grp |
87 1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
88
89 err = radix_tree_insert(&sbi->workstn_tree,
90 grp->index, grp);
91
92 if (!err) {
93 __erofs_workgroup_get(grp);
94 }
95
96 erofs_workstn_unlock(sbi);
97 radix_tree_preload_end();
98 return err;
99}
100
101unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
102 unsigned long nr_shrink,
103 bool cleanup)
104{
105 return 0;
106}
107
108#endif
32 109
33/* protected by 'erofs_sb_list_lock' */ 110/* protected by 'erofs_sb_list_lock' */
34static unsigned int shrinker_run_no; 111static unsigned int shrinker_run_no;
@@ -37,9 +114,6 @@ static unsigned int shrinker_run_no;
37static DEFINE_SPINLOCK(erofs_sb_list_lock); 114static DEFINE_SPINLOCK(erofs_sb_list_lock);
38static LIST_HEAD(erofs_sb_list); 115static LIST_HEAD(erofs_sb_list);
39 116
40/* global shrink count (for all mounted EROFS instances) */
41static atomic_long_t erofs_global_shrink_cnt;
42
43void erofs_register_super(struct super_block *sb) 117void erofs_register_super(struct super_block *sb)
44{ 118{
45 struct erofs_sb_info *sbi = EROFS_SB(sb); 119 struct erofs_sb_info *sbi = EROFS_SB(sb);
@@ -112,6 +186,7 @@ unsigned long erofs_shrink_scan(struct shrinker *shrink,
112 list_move_tail(&sbi->list, &erofs_sb_list); 186 list_move_tail(&sbi->list, &erofs_sb_list);
113 mutex_unlock(&sbi->umount_mutex); 187 mutex_unlock(&sbi->umount_mutex);
114 188
189 freed += erofs_shrink_workstation(sbi, nr, false);
115 if (freed >= nr) 190 if (freed >= nr)
116 break; 191 break;
117 } 192 }