diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2015-04-14 18:46:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:03 -0400 |
commit | 3cb29d11174f29b76addcba4374884b14f8ea4b1 (patch) | |
tree | d73223beced01147ddeebf3a78ccc6ab4f8ffe15 | |
parent | 53d85c98566535d7bc69470f008d7dcb09a0fec9 (diff) |
cleancache: remove limit on the number of cleancache enabled filesystems
The limit equals 32 and is imposed by the number of entries in the
fs_poolid_map and shared_fs_poolid_map. Nowadays it is insufficient,
because with containers on board a Linux host can have hundreds of
active fs mounts.
These maps were introduced by commit 49a9ab815acb8 ("mm: cleancache:
lazy initialization to allow tmem backends to build/run as modules") in
order to allow compiling cleancache drivers as modules. Real pool ids
are stored in these maps while super_block->cleancache_poolid points to
an entry in the map, so that on cleancache registration we can walk over
all (if there are <= 32 of them, of course) cleancache-enabled super
blocks and assign real pool ids.
Actually, there is absolutely no need in these maps, because we can
iterate over all super blocks immediately using iterate_supers. This is
not racy, because cleancache_init_ops is called from mount_fs with
super_block->s_umount held for writing, while iterate_supers takes this
semaphore for reading, so if we call iterate_supers after setting
cleancache_ops, all super blocks that had been created before
cleancache_register_ops was called will be assigned pool ids by the
action function of iterate_supers while all newer super blocks will
receive it in cleancache_init_fs.
This patch therefore removes the maps and hence the artificial limit on
the number of cleancache enabled filesystems.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
Cc: Bob Liu <lliubbo@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/super.c | 2 | ||||
-rw-r--r-- | include/linux/cleancache.h | 4 | ||||
-rw-r--r-- | mm/cleancache.c | 270 |
3 files changed, 94 insertions, 182 deletions
diff --git a/fs/super.c b/fs/super.c index 2b7dc90ccdbb..928c20f47af9 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -224,7 +224,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) | |||
224 | s->s_maxbytes = MAX_NON_LFS; | 224 | s->s_maxbytes = MAX_NON_LFS; |
225 | s->s_op = &default_op; | 225 | s->s_op = &default_op; |
226 | s->s_time_gran = 1000000000; | 226 | s->s_time_gran = 1000000000; |
227 | s->cleancache_poolid = -1; | 227 | s->cleancache_poolid = CLEANCACHE_NO_POOL; |
228 | 228 | ||
229 | s->s_shrink.seeks = DEFAULT_SEEKS; | 229 | s->s_shrink.seeks = DEFAULT_SEEKS; |
230 | s->s_shrink.scan_objects = super_cache_scan; | 230 | s->s_shrink.scan_objects = super_cache_scan; |
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index b23611f43cfb..bda5ec0b4b4d 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h | |||
@@ -5,6 +5,10 @@ | |||
5 | #include <linux/exportfs.h> | 5 | #include <linux/exportfs.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | 7 | ||
8 | #define CLEANCACHE_NO_POOL -1 | ||
9 | #define CLEANCACHE_NO_BACKEND -2 | ||
10 | #define CLEANCACHE_NO_BACKEND_SHARED -3 | ||
11 | |||
8 | #define CLEANCACHE_KEY_MAX 6 | 12 | #define CLEANCACHE_KEY_MAX 6 |
9 | 13 | ||
10 | /* | 14 | /* |
diff --git a/mm/cleancache.c b/mm/cleancache.c index aa10f9a3bc88..8fc50811119b 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/cleancache.h> | 19 | #include <linux/cleancache.h> |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * cleancache_ops is set by cleancache_ops_register to contain the pointers | 22 | * cleancache_ops is set by cleancache_register_ops to contain the pointers |
23 | * to the cleancache "backend" implementation functions. | 23 | * to the cleancache "backend" implementation functions. |
24 | */ | 24 | */ |
25 | static struct cleancache_ops *cleancache_ops __read_mostly; | 25 | static struct cleancache_ops *cleancache_ops __read_mostly; |
@@ -34,104 +34,78 @@ static u64 cleancache_failed_gets; | |||
34 | static u64 cleancache_puts; | 34 | static u64 cleancache_puts; |
35 | static u64 cleancache_invalidates; | 35 | static u64 cleancache_invalidates; |
36 | 36 | ||
37 | /* | 37 | static void cleancache_register_ops_sb(struct super_block *sb, void *unused) |
38 | * When no backend is registered all calls to init_fs and init_shared_fs | 38 | { |
39 | * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or | 39 | switch (sb->cleancache_poolid) { |
40 | * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array | 40 | case CLEANCACHE_NO_BACKEND: |
41 | * [shared_|]fs_poolid_map) are given to the respective super block | 41 | __cleancache_init_fs(sb); |
42 | * (sb->cleancache_poolid) and no tmem_pools are created. When a backend | 42 | break; |
43 | * registers with cleancache the previous calls to init_fs and init_shared_fs | 43 | case CLEANCACHE_NO_BACKEND_SHARED: |
44 | * are executed to create tmem_pools and set the respective poolids. While no | 44 | __cleancache_init_shared_fs(sb); |
45 | * backend is registered all "puts", "gets" and "flushes" are ignored or failed. | 45 | break; |
46 | */ | 46 | } |
47 | #define MAX_INITIALIZABLE_FS 32 | 47 | } |
48 | #define FAKE_FS_POOLID_OFFSET 1000 | ||
49 | #define FAKE_SHARED_FS_POOLID_OFFSET 2000 | ||
50 | |||
51 | #define FS_NO_BACKEND (-1) | ||
52 | #define FS_UNKNOWN (-2) | ||
53 | static int fs_poolid_map[MAX_INITIALIZABLE_FS]; | ||
54 | static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS]; | ||
55 | static char *uuids[MAX_INITIALIZABLE_FS]; | ||
56 | /* | ||
57 | * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads | ||
58 | * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple | ||
59 | * threads calling mount (and ending up in __cleancache_init_[shared|]fs). | ||
60 | */ | ||
61 | static DEFINE_MUTEX(poolid_mutex); | ||
62 | /* | ||
63 | * When set to false (default) all calls to the cleancache functions, except | ||
64 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded | ||
65 | * by the if (!cleancache_ops) return. This means multiple threads (from | ||
66 | * different filesystems) will be checking cleancache_ops. The usage of a | ||
67 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are | ||
68 | * OK if the time between the backend's have been initialized (and | ||
69 | * cleancache_ops has been set to not NULL) and when the filesystems start | ||
70 | * actually calling the backends. The inverse (when unloading) is obviously | ||
71 | * not good - but this shim does not do that (yet). | ||
72 | */ | ||
73 | |||
74 | /* | ||
75 | * The backends and filesystems work all asynchronously. This is b/c the | ||
76 | * backends can be built as modules. | ||
77 | * The usual sequence of events is: | ||
78 | * a) mount / -> __cleancache_init_fs is called. We set the | ||
79 | * [shared_|]fs_poolid_map and uuids for. | ||
80 | * | ||
81 | * b). user does I/Os -> we call the rest of __cleancache_* functions | ||
82 | * which return immediately as cleancache_ops is false. | ||
83 | * | ||
84 | * c). modprobe zcache -> cleancache_register_ops. We init the backend | ||
85 | * and set cleancache_ops to true, and for any fs_poolid_map | ||
86 | * (which is set by __cleancache_init_fs) we initialize the poolid. | ||
87 | * | ||
88 | * d). user does I/Os -> now that cleancache_ops is true all the | ||
89 | * __cleancache_* functions can call the backend. They all check | ||
90 | * that fs_poolid_map is valid and if so invoke the backend. | ||
91 | * | ||
92 | * e). umount / -> __cleancache_invalidate_fs, the fs_poolid_map is | ||
93 | * reset (which is the second check in the __cleancache_* ops | ||
94 | * to call the backend). | ||
95 | * | ||
96 | * The sequence of event could also be c), followed by a), and d). and e). The | ||
97 | * c) would not happen anymore. There is also the chance of c), and one thread | ||
98 | * doing a) + d), and another doing e). For that case we depend on the | ||
99 | * filesystem calling __cleancache_invalidate_fs in the proper sequence (so | ||
100 | * that it handles all I/Os before it invalidates the fs (which is last part | ||
101 | * of unmounting process). | ||
102 | * | ||
103 | * Note: The acute reader will notice that there is no "rmmod zcache" case. | ||
104 | * This is b/c the functionality for that is not yet implemented and when | ||
105 | * done, will require some extra locking not yet devised. | ||
106 | */ | ||
107 | 48 | ||
108 | /* | 49 | /* |
109 | * Register operations for cleancache. Returns 0 on success. | 50 | * Register operations for cleancache. Returns 0 on success. |
110 | */ | 51 | */ |
111 | int cleancache_register_ops(struct cleancache_ops *ops) | 52 | int cleancache_register_ops(struct cleancache_ops *ops) |
112 | { | 53 | { |
113 | int i; | 54 | if (cmpxchg(&cleancache_ops, NULL, ops)) |
114 | |||
115 | mutex_lock(&poolid_mutex); | ||
116 | if (cleancache_ops) { | ||
117 | mutex_unlock(&poolid_mutex); | ||
118 | return -EBUSY; | 55 | return -EBUSY; |
119 | } | 56 | |
120 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | ||
121 | if (fs_poolid_map[i] == FS_NO_BACKEND) | ||
122 | fs_poolid_map[i] = ops->init_fs(PAGE_SIZE); | ||
123 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) | ||
124 | shared_fs_poolid_map[i] = ops->init_shared_fs | ||
125 | (uuids[i], PAGE_SIZE); | ||
126 | } | ||
127 | /* | 57 | /* |
128 | * We MUST set cleancache_ops _after_ we have called the backends | 58 | * A cleancache backend can be built as a module and hence loaded after |
129 | * init_fs or init_shared_fs functions. Otherwise the compiler might | 59 | * a cleancache enabled filesystem has called cleancache_init_fs. To |
130 | * re-order where cleancache_ops is set in this function. | 60 | * handle such a scenario, here we call ->init_fs or ->init_shared_fs |
61 | * for each active super block. To differentiate between local and | ||
62 | * shared filesystems, we temporarily initialize sb->cleancache_poolid | ||
63 | * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED | ||
64 | * respectively in case there is no backend registered at the time | ||
65 | * cleancache_init_fs or cleancache_init_shared_fs is called. | ||
66 | * | ||
67 | * Since filesystems can be mounted concurrently with cleancache | ||
68 | * backend registration, we have to be careful to guarantee that all | ||
69 | * cleancache enabled filesystems that has been mounted by the time | ||
70 | * cleancache_register_ops is called has got and all mounted later will | ||
71 | * get cleancache_poolid. This is assured by the following statements | ||
72 | * tied together: | ||
73 | * | ||
74 | * a) iterate_supers skips only those super blocks that has started | ||
75 | * ->kill_sb | ||
76 | * | ||
77 | * b) if iterate_supers encounters a super block that has not finished | ||
78 | * ->mount yet, it waits until it is finished | ||
79 | * | ||
80 | * c) cleancache_init_fs is called from ->mount and | ||
81 | * cleancache_invalidate_fs is called from ->kill_sb | ||
82 | * | ||
83 | * d) we call iterate_supers after cleancache_ops has been set | ||
84 | * | ||
85 | * From a) it follows that if iterate_supers skips a super block, then | ||
86 | * either the super block is already dead, in which case we do not need | ||
87 | * to bother initializing cleancache for it, or it was mounted after we | ||
88 | * initiated iterate_supers. In the latter case, it must have seen | ||
89 | * cleancache_ops set according to d) and initialized cleancache from | ||
90 | * ->mount by itself according to c). This proves that we call | ||
91 | * ->init_fs at least once for each active super block. | ||
92 | * | ||
93 | * From b) and c) it follows that if iterate_supers encounters a super | ||
94 | * block that has already started ->init_fs, it will wait until ->mount | ||
95 | * and hence ->init_fs has finished, then check cleancache_poolid, see | ||
96 | * that it has already been set and therefore do nothing. This proves | ||
97 | * that we call ->init_fs no more than once for each super block. | ||
98 | * | ||
99 | * Combined together, the last two paragraphs prove the function | ||
100 | * correctness. | ||
101 | * | ||
102 | * Note that various cleancache callbacks may proceed before this | ||
103 | * function is called or even concurrently with it, but since | ||
104 | * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop | ||
105 | * until the corresponding ->init_fs has been actually called and | ||
106 | * cleancache_ops has been set. | ||
131 | */ | 107 | */ |
132 | barrier(); | 108 | iterate_supers(cleancache_register_ops_sb, NULL); |
133 | cleancache_ops = ops; | ||
134 | mutex_unlock(&poolid_mutex); | ||
135 | return 0; | 109 | return 0; |
136 | } | 110 | } |
137 | EXPORT_SYMBOL(cleancache_register_ops); | 111 | EXPORT_SYMBOL(cleancache_register_ops); |
@@ -139,42 +113,28 @@ EXPORT_SYMBOL(cleancache_register_ops); | |||
139 | /* Called by a cleancache-enabled filesystem at time of mount */ | 113 | /* Called by a cleancache-enabled filesystem at time of mount */ |
140 | void __cleancache_init_fs(struct super_block *sb) | 114 | void __cleancache_init_fs(struct super_block *sb) |
141 | { | 115 | { |
142 | int i; | 116 | int pool_id = CLEANCACHE_NO_BACKEND; |
143 | 117 | ||
144 | mutex_lock(&poolid_mutex); | 118 | if (cleancache_ops) { |
145 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | 119 | pool_id = cleancache_ops->init_fs(PAGE_SIZE); |
146 | if (fs_poolid_map[i] == FS_UNKNOWN) { | 120 | if (pool_id < 0) |
147 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; | 121 | pool_id = CLEANCACHE_NO_POOL; |
148 | if (cleancache_ops) | ||
149 | fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE); | ||
150 | else | ||
151 | fs_poolid_map[i] = FS_NO_BACKEND; | ||
152 | break; | ||
153 | } | ||
154 | } | 122 | } |
155 | mutex_unlock(&poolid_mutex); | 123 | sb->cleancache_poolid = pool_id; |
156 | } | 124 | } |
157 | EXPORT_SYMBOL(__cleancache_init_fs); | 125 | EXPORT_SYMBOL(__cleancache_init_fs); |
158 | 126 | ||
159 | /* Called by a cleancache-enabled clustered filesystem at time of mount */ | 127 | /* Called by a cleancache-enabled clustered filesystem at time of mount */ |
160 | void __cleancache_init_shared_fs(struct super_block *sb) | 128 | void __cleancache_init_shared_fs(struct super_block *sb) |
161 | { | 129 | { |
162 | int i; | 130 | int pool_id = CLEANCACHE_NO_BACKEND_SHARED; |
163 | 131 | ||
164 | mutex_lock(&poolid_mutex); | 132 | if (cleancache_ops) { |
165 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | 133 | pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE); |
166 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { | 134 | if (pool_id < 0) |
167 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; | 135 | pool_id = CLEANCACHE_NO_POOL; |
168 | uuids[i] = sb->s_uuid; | ||
169 | if (cleancache_ops) | ||
170 | shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs | ||
171 | (sb->s_uuid, PAGE_SIZE); | ||
172 | else | ||
173 | shared_fs_poolid_map[i] = FS_NO_BACKEND; | ||
174 | break; | ||
175 | } | ||
176 | } | 136 | } |
177 | mutex_unlock(&poolid_mutex); | 137 | sb->cleancache_poolid = pool_id; |
178 | } | 138 | } |
179 | EXPORT_SYMBOL(__cleancache_init_shared_fs); | 139 | EXPORT_SYMBOL(__cleancache_init_shared_fs); |
180 | 140 | ||
@@ -204,19 +164,6 @@ static int cleancache_get_key(struct inode *inode, | |||
204 | } | 164 | } |
205 | 165 | ||
206 | /* | 166 | /* |
207 | * Returns a pool_id that is associated with a given fake poolid. | ||
208 | */ | ||
209 | static int get_poolid_from_fake(int fake_pool_id) | ||
210 | { | ||
211 | if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) | ||
212 | return shared_fs_poolid_map[fake_pool_id - | ||
213 | FAKE_SHARED_FS_POOLID_OFFSET]; | ||
214 | else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) | ||
215 | return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET]; | ||
216 | return FS_NO_BACKEND; | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | * "Get" data from cleancache associated with the poolid/inode/index | 167 | * "Get" data from cleancache associated with the poolid/inode/index |
221 | * that were specified when the data was put to cleanache and, if | 168 | * that were specified when the data was put to cleanache and, if |
222 | * successful, use it to fill the specified page with data and return 0. | 169 | * successful, use it to fill the specified page with data and return 0. |
@@ -231,7 +178,6 @@ int __cleancache_get_page(struct page *page) | |||
231 | { | 178 | { |
232 | int ret = -1; | 179 | int ret = -1; |
233 | int pool_id; | 180 | int pool_id; |
234 | int fake_pool_id; | ||
235 | struct cleancache_filekey key = { .u.key = { 0 } }; | 181 | struct cleancache_filekey key = { .u.key = { 0 } }; |
236 | 182 | ||
237 | if (!cleancache_ops) { | 183 | if (!cleancache_ops) { |
@@ -240,17 +186,14 @@ int __cleancache_get_page(struct page *page) | |||
240 | } | 186 | } |
241 | 187 | ||
242 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 188 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
243 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; | 189 | pool_id = page->mapping->host->i_sb->cleancache_poolid; |
244 | if (fake_pool_id < 0) | 190 | if (pool_id < 0) |
245 | goto out; | 191 | goto out; |
246 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
247 | 192 | ||
248 | if (cleancache_get_key(page->mapping->host, &key) < 0) | 193 | if (cleancache_get_key(page->mapping->host, &key) < 0) |
249 | goto out; | 194 | goto out; |
250 | 195 | ||
251 | if (pool_id >= 0) | 196 | ret = cleancache_ops->get_page(pool_id, key, page->index, page); |
252 | ret = cleancache_ops->get_page(pool_id, | ||
253 | key, page->index, page); | ||
254 | if (ret == 0) | 197 | if (ret == 0) |
255 | cleancache_succ_gets++; | 198 | cleancache_succ_gets++; |
256 | else | 199 | else |
@@ -273,7 +216,6 @@ EXPORT_SYMBOL(__cleancache_get_page); | |||
273 | void __cleancache_put_page(struct page *page) | 216 | void __cleancache_put_page(struct page *page) |
274 | { | 217 | { |
275 | int pool_id; | 218 | int pool_id; |
276 | int fake_pool_id; | ||
277 | struct cleancache_filekey key = { .u.key = { 0 } }; | 219 | struct cleancache_filekey key = { .u.key = { 0 } }; |
278 | 220 | ||
279 | if (!cleancache_ops) { | 221 | if (!cleancache_ops) { |
@@ -282,12 +224,7 @@ void __cleancache_put_page(struct page *page) | |||
282 | } | 224 | } |
283 | 225 | ||
284 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 226 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
285 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; | 227 | pool_id = page->mapping->host->i_sb->cleancache_poolid; |
286 | if (fake_pool_id < 0) | ||
287 | return; | ||
288 | |||
289 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
290 | |||
291 | if (pool_id >= 0 && | 228 | if (pool_id >= 0 && |
292 | cleancache_get_key(page->mapping->host, &key) >= 0) { | 229 | cleancache_get_key(page->mapping->host, &key) >= 0) { |
293 | cleancache_ops->put_page(pool_id, key, page->index, page); | 230 | cleancache_ops->put_page(pool_id, key, page->index, page); |
@@ -308,18 +245,13 @@ void __cleancache_invalidate_page(struct address_space *mapping, | |||
308 | struct page *page) | 245 | struct page *page) |
309 | { | 246 | { |
310 | /* careful... page->mapping is NULL sometimes when this is called */ | 247 | /* careful... page->mapping is NULL sometimes when this is called */ |
311 | int pool_id; | 248 | int pool_id = mapping->host->i_sb->cleancache_poolid; |
312 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | ||
313 | struct cleancache_filekey key = { .u.key = { 0 } }; | 249 | struct cleancache_filekey key = { .u.key = { 0 } }; |
314 | 250 | ||
315 | if (!cleancache_ops) | 251 | if (!cleancache_ops) |
316 | return; | 252 | return; |
317 | 253 | ||
318 | if (fake_pool_id >= 0) { | 254 | if (pool_id >= 0) { |
319 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
320 | if (pool_id < 0) | ||
321 | return; | ||
322 | |||
323 | VM_BUG_ON_PAGE(!PageLocked(page), page); | 255 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
324 | if (cleancache_get_key(mapping->host, &key) >= 0) { | 256 | if (cleancache_get_key(mapping->host, &key) >= 0) { |
325 | cleancache_ops->invalidate_page(pool_id, | 257 | cleancache_ops->invalidate_page(pool_id, |
@@ -341,18 +273,12 @@ EXPORT_SYMBOL(__cleancache_invalidate_page); | |||
341 | */ | 273 | */ |
342 | void __cleancache_invalidate_inode(struct address_space *mapping) | 274 | void __cleancache_invalidate_inode(struct address_space *mapping) |
343 | { | 275 | { |
344 | int pool_id; | 276 | int pool_id = mapping->host->i_sb->cleancache_poolid; |
345 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | ||
346 | struct cleancache_filekey key = { .u.key = { 0 } }; | 277 | struct cleancache_filekey key = { .u.key = { 0 } }; |
347 | 278 | ||
348 | if (!cleancache_ops) | 279 | if (!cleancache_ops) |
349 | return; | 280 | return; |
350 | 281 | ||
351 | if (fake_pool_id < 0) | ||
352 | return; | ||
353 | |||
354 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
355 | |||
356 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) | 282 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) |
357 | cleancache_ops->invalidate_inode(pool_id, key); | 283 | cleancache_ops->invalidate_inode(pool_id, key); |
358 | } | 284 | } |
@@ -365,32 +291,18 @@ EXPORT_SYMBOL(__cleancache_invalidate_inode); | |||
365 | */ | 291 | */ |
366 | void __cleancache_invalidate_fs(struct super_block *sb) | 292 | void __cleancache_invalidate_fs(struct super_block *sb) |
367 | { | 293 | { |
368 | int index; | 294 | int pool_id; |
369 | int fake_pool_id = sb->cleancache_poolid; | ||
370 | int old_poolid = fake_pool_id; | ||
371 | 295 | ||
372 | mutex_lock(&poolid_mutex); | 296 | pool_id = sb->cleancache_poolid; |
373 | if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) { | 297 | sb->cleancache_poolid = CLEANCACHE_NO_POOL; |
374 | index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET; | 298 | |
375 | old_poolid = shared_fs_poolid_map[index]; | 299 | if (cleancache_ops && pool_id >= 0) |
376 | shared_fs_poolid_map[index] = FS_UNKNOWN; | 300 | cleancache_ops->invalidate_fs(pool_id); |
377 | uuids[index] = NULL; | ||
378 | } else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) { | ||
379 | index = fake_pool_id - FAKE_FS_POOLID_OFFSET; | ||
380 | old_poolid = fs_poolid_map[index]; | ||
381 | fs_poolid_map[index] = FS_UNKNOWN; | ||
382 | } | ||
383 | sb->cleancache_poolid = -1; | ||
384 | if (cleancache_ops) | ||
385 | cleancache_ops->invalidate_fs(old_poolid); | ||
386 | mutex_unlock(&poolid_mutex); | ||
387 | } | 301 | } |
388 | EXPORT_SYMBOL(__cleancache_invalidate_fs); | 302 | EXPORT_SYMBOL(__cleancache_invalidate_fs); |
389 | 303 | ||
390 | static int __init init_cleancache(void) | 304 | static int __init init_cleancache(void) |
391 | { | 305 | { |
392 | int i; | ||
393 | |||
394 | #ifdef CONFIG_DEBUG_FS | 306 | #ifdef CONFIG_DEBUG_FS |
395 | struct dentry *root = debugfs_create_dir("cleancache", NULL); | 307 | struct dentry *root = debugfs_create_dir("cleancache", NULL); |
396 | if (root == NULL) | 308 | if (root == NULL) |
@@ -402,10 +314,6 @@ static int __init init_cleancache(void) | |||
402 | debugfs_create_u64("invalidates", S_IRUGO, | 314 | debugfs_create_u64("invalidates", S_IRUGO, |
403 | root, &cleancache_invalidates); | 315 | root, &cleancache_invalidates); |
404 | #endif | 316 | #endif |
405 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | ||
406 | fs_poolid_map[i] = FS_UNKNOWN; | ||
407 | shared_fs_poolid_map[i] = FS_UNKNOWN; | ||
408 | } | ||
409 | return 0; | 317 | return 0; |
410 | } | 318 | } |
411 | module_init(init_cleancache) | 319 | module_init(init_cleancache) |