diff options
author | Dan Magenheimer <dan.magenheimer@oracle.com> | 2013-04-30 18:26:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 20:04:01 -0400 |
commit | 49a9ab815acb8379a2f5fd43abe40038821e8f87 (patch) | |
tree | 965226ce0574ce3906ba433bfcb11463c56eff24 /mm | |
parent | 4f89849da22db9d0edb378acea65e23fcd546173 (diff) |
mm: cleancache: lazy initialization to allow tmem backends to build/run as modules
With the goal of allowing tmem backends (zcache, ramster, Xen tmem) to
be built/loaded as modules rather than built-in and enabled by a boot
parameter, this patch provides "lazy initialization", allowing backends
to register to cleancache even after filesystems were mounted. Calls to
init_fs and init_shared_fs are remembered as fake poolids but no real
tmem_pools created. On backend registration the fake poolids are mapped
to real poolids and respective tmem_pools.
Signed-off-by: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: Florian Schmaus <fschmaus@gmail.com>
Signed-off-by: Andor Daam <andor.daam@googlemail.com>
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
[v1: Minor fixes: used #define for some values and bools]
[v2: Removed CLEANCACHE_HAS_LAZY_INIT]
[v3: Added more comments, added a lock for [shared_|]fs_poolid_map]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/cleancache.c | 240 |
1 files changed, 219 insertions, 21 deletions
diff --git a/mm/cleancache.c b/mm/cleancache.c index d76ba74be2d0..0cecdbba4bcd 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c | |||
@@ -45,15 +45,99 @@ static u64 cleancache_puts; | |||
45 | static u64 cleancache_invalidates; | 45 | static u64 cleancache_invalidates; |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * register operations for cleancache, returning previous thus allowing | 48 | * When no backend is registered all calls to init_fs and init_shared_fs |
49 | * detection of multiple backends and possible nesting | 49 | * are registered and fake poolids (FAKE_FS_POOLID_OFFSET or |
50 | * FAKE_SHARED_FS_POOLID_OFFSET, plus offset in the respective array | ||
51 | * [shared_|]fs_poolid_map) are given to the respective super block | ||
52 | * (sb->cleancache_poolid) and no tmem_pools are created. When a backend | ||
53 | * registers with cleancache the previous calls to init_fs and init_shared_fs | ||
54 | * are executed to create tmem_pools and set the respective poolids. While no | ||
55 | * backend is registered all "puts", "gets" and "flushes" are ignored or failed. | ||
56 | */ | ||
57 | #define MAX_INITIALIZABLE_FS 32 | ||
58 | #define FAKE_FS_POOLID_OFFSET 1000 | ||
59 | #define FAKE_SHARED_FS_POOLID_OFFSET 2000 | ||
60 | |||
61 | #define FS_NO_BACKEND (-1) | ||
62 | #define FS_UNKNOWN (-2) | ||
63 | static int fs_poolid_map[MAX_INITIALIZABLE_FS]; | ||
64 | static int shared_fs_poolid_map[MAX_INITIALIZABLE_FS]; | ||
65 | static char *uuids[MAX_INITIALIZABLE_FS]; | ||
66 | /* | ||
67 | * Mutex for the [shared_|]fs_poolid_map to guard against multiple threads | ||
68 | * invoking umount (and ending in __cleancache_invalidate_fs) and also multiple | ||
69 | * threads calling mount (and ending up in __cleancache_init_[shared|]fs). | ||
70 | */ | ||
71 | static DEFINE_MUTEX(poolid_mutex); | ||
72 | /* | ||
73 | * When set to false (default) all calls to the cleancache functions, except | ||
74 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded | ||
75 | * by the if (!backend_registered) return. This means multiple threads (from | ||
76 | * different filesystems) will be checking backend_registered. The usage of a | ||
77 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are | ||
78 | * OK if the time between the backend's have been initialized (and | ||
79 | * backend_registered has been set to true) and when the filesystems start | ||
80 | * actually calling the backends. The inverse (when unloading) is obviously | ||
81 | * not good - but this shim does not do that (yet). | ||
82 | */ | ||
83 | static bool backend_registered __read_mostly; | ||
84 | |||
85 | /* | ||
86 | * The backends and filesystems work all asynchronously. This is b/c the | ||
87 | * backends can be built as modules. | ||
88 | * The usual sequence of events is: | ||
89 | * a) mount / -> __cleancache_init_fs is called. We set the | ||
90 | * [shared_|]fs_poolid_map and uuids for. | ||
91 | * | ||
92 | * b). user does I/Os -> we call the rest of __cleancache_* functions | ||
93 | * which return immediately as backend_registered is false. | ||
94 | * | ||
95 | * c). modprobe zcache -> cleancache_register_ops. We init the backend | ||
96 | * and set backend_registered to true, and for any fs_poolid_map | ||
97 | * (which is set by __cleancache_init_fs) we initialize the poolid. | ||
98 | * | ||
99 | * d). user does I/Os -> now that backend_registered is true all the | ||
100 | * __cleancache_* functions can call the backend. They all check | ||
101 | * that fs_poolid_map is valid and if so invoke the backend. | ||
102 | * | ||
103 | * e). umount / -> __cleancache_invalidate_fs, the fs_poolid_map is | ||
104 | * reset (which is the second check in the __cleancache_* ops | ||
105 | * to call the backend). | ||
106 | * | ||
107 | * The sequence of event could also be c), followed by a), and d). and e). The | ||
108 | * c) would not happen anymore. There is also the chance of c), and one thread | ||
109 | * doing a) + d), and another doing e). For that case we depend on the | ||
110 | * filesystem calling __cleancache_invalidate_fs in the proper sequence (so | ||
111 | * that it handles all I/Os before it invalidates the fs (which is last part | ||
112 | * of unmounting process). | ||
113 | * | ||
114 | * Note: The acute reader will notice that there is no "rmmod zcache" case. | ||
115 | * This is b/c the functionality for that is not yet implemented and when | ||
116 | * done, will require some extra locking not yet devised. | ||
117 | */ | ||
118 | |||
119 | /* | ||
120 | * Register operations for cleancache, returning previous thus allowing | ||
121 | * detection of multiple backends and possible nesting. | ||
50 | */ | 122 | */ |
51 | struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) | 123 | struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) |
52 | { | 124 | { |
53 | struct cleancache_ops old = cleancache_ops; | 125 | struct cleancache_ops old = cleancache_ops; |
126 | int i; | ||
54 | 127 | ||
128 | mutex_lock(&poolid_mutex); | ||
55 | cleancache_ops = *ops; | 129 | cleancache_ops = *ops; |
56 | cleancache_enabled = 1; | 130 | |
131 | backend_registered = true; | ||
132 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | ||
133 | if (fs_poolid_map[i] == FS_NO_BACKEND) | ||
134 | fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE); | ||
135 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) | ||
136 | shared_fs_poolid_map[i] = (*cleancache_ops.init_shared_fs) | ||
137 | (uuids[i], PAGE_SIZE); | ||
138 | } | ||
139 | out: | ||
140 | mutex_unlock(&poolid_mutex); | ||
57 | return old; | 141 | return old; |
58 | } | 142 | } |
59 | EXPORT_SYMBOL(cleancache_register_ops); | 143 | EXPORT_SYMBOL(cleancache_register_ops); |
@@ -61,15 +145,42 @@ EXPORT_SYMBOL(cleancache_register_ops); | |||
61 | /* Called by a cleancache-enabled filesystem at time of mount */ | 145 | /* Called by a cleancache-enabled filesystem at time of mount */ |
62 | void __cleancache_init_fs(struct super_block *sb) | 146 | void __cleancache_init_fs(struct super_block *sb) |
63 | { | 147 | { |
64 | sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE); | 148 | int i; |
149 | |||
150 | mutex_lock(&poolid_mutex); | ||
151 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | ||
152 | if (fs_poolid_map[i] == FS_UNKNOWN) { | ||
153 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; | ||
154 | if (backend_registered) | ||
155 | fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE); | ||
156 | else | ||
157 | fs_poolid_map[i] = FS_NO_BACKEND; | ||
158 | break; | ||
159 | } | ||
160 | } | ||
161 | mutex_unlock(&poolid_mutex); | ||
65 | } | 162 | } |
66 | EXPORT_SYMBOL(__cleancache_init_fs); | 163 | EXPORT_SYMBOL(__cleancache_init_fs); |
67 | 164 | ||
68 | /* Called by a cleancache-enabled clustered filesystem at time of mount */ | 165 | /* Called by a cleancache-enabled clustered filesystem at time of mount */ |
69 | void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) | 166 | void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) |
70 | { | 167 | { |
71 | sb->cleancache_poolid = | 168 | int i; |
72 | (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE); | 169 | |
170 | mutex_lock(&poolid_mutex); | ||
171 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | ||
172 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { | ||
173 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; | ||
174 | uuids[i] = uuid; | ||
175 | if (backend_registered) | ||
176 | shared_fs_poolid_map[i] = (*cleancache_ops.init_shared_fs) | ||
177 | (uuid, PAGE_SIZE); | ||
178 | else | ||
179 | shared_fs_poolid_map[i] = FS_NO_BACKEND; | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | mutex_unlock(&poolid_mutex); | ||
73 | } | 184 | } |
74 | EXPORT_SYMBOL(__cleancache_init_shared_fs); | 185 | EXPORT_SYMBOL(__cleancache_init_shared_fs); |
75 | 186 | ||
@@ -99,27 +210,53 @@ static int cleancache_get_key(struct inode *inode, | |||
99 | } | 210 | } |
100 | 211 | ||
101 | /* | 212 | /* |
213 | * Returns a pool_id that is associated with a given fake poolid. | ||
214 | */ | ||
215 | static int get_poolid_from_fake(int fake_pool_id) | ||
216 | { | ||
217 | if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) | ||
218 | return shared_fs_poolid_map[fake_pool_id - | ||
219 | FAKE_SHARED_FS_POOLID_OFFSET]; | ||
220 | else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) | ||
221 | return fs_poolid_map[fake_pool_id - FAKE_FS_POOLID_OFFSET]; | ||
222 | return FS_NO_BACKEND; | ||
223 | } | ||
224 | |||
225 | /* | ||
102 | * "Get" data from cleancache associated with the poolid/inode/index | 226 | * "Get" data from cleancache associated with the poolid/inode/index |
103 | * that were specified when the data was put to cleanache and, if | 227 | * that were specified when the data was put to cleanache and, if |
104 | * successful, use it to fill the specified page with data and return 0. | 228 | * successful, use it to fill the specified page with data and return 0. |
105 | * The pageframe is unchanged and returns -1 if the get fails. | 229 | * The pageframe is unchanged and returns -1 if the get fails. |
106 | * Page must be locked by caller. | 230 | * Page must be locked by caller. |
231 | * | ||
232 | * The function has two checks before any action is taken - whether | ||
233 | * a backend is registered and whether the sb->cleancache_poolid | ||
234 | * is correct. | ||
107 | */ | 235 | */ |
108 | int __cleancache_get_page(struct page *page) | 236 | int __cleancache_get_page(struct page *page) |
109 | { | 237 | { |
110 | int ret = -1; | 238 | int ret = -1; |
111 | int pool_id; | 239 | int pool_id; |
240 | int fake_pool_id; | ||
112 | struct cleancache_filekey key = { .u.key = { 0 } }; | 241 | struct cleancache_filekey key = { .u.key = { 0 } }; |
113 | 242 | ||
243 | if (!backend_registered) { | ||
244 | cleancache_failed_gets++; | ||
245 | goto out; | ||
246 | } | ||
247 | |||
114 | VM_BUG_ON(!PageLocked(page)); | 248 | VM_BUG_ON(!PageLocked(page)); |
115 | pool_id = page->mapping->host->i_sb->cleancache_poolid; | 249 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; |
116 | if (pool_id < 0) | 250 | if (fake_pool_id < 0) |
117 | goto out; | 251 | goto out; |
252 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
118 | 253 | ||
119 | if (cleancache_get_key(page->mapping->host, &key) < 0) | 254 | if (cleancache_get_key(page->mapping->host, &key) < 0) |
120 | goto out; | 255 | goto out; |
121 | 256 | ||
122 | ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page); | 257 | if (pool_id >= 0) |
258 | ret = (*cleancache_ops.get_page)(pool_id, | ||
259 | key, page->index, page); | ||
123 | if (ret == 0) | 260 | if (ret == 0) |
124 | cleancache_succ_gets++; | 261 | cleancache_succ_gets++; |
125 | else | 262 | else |
@@ -134,16 +271,31 @@ EXPORT_SYMBOL(__cleancache_get_page); | |||
134 | * (previously-obtained per-filesystem) poolid and the page's, | 271 | * (previously-obtained per-filesystem) poolid and the page's, |
135 | * inode and page index. Page must be locked. Note that a put_page | 272 | * inode and page index. Page must be locked. Note that a put_page |
136 | * always "succeeds", though a subsequent get_page may succeed or fail. | 273 | * always "succeeds", though a subsequent get_page may succeed or fail. |
274 | * | ||
275 | * The function has two checks before any action is taken - whether | ||
276 | * a backend is registered and whether the sb->cleancache_poolid | ||
277 | * is correct. | ||
137 | */ | 278 | */ |
138 | void __cleancache_put_page(struct page *page) | 279 | void __cleancache_put_page(struct page *page) |
139 | { | 280 | { |
140 | int pool_id; | 281 | int pool_id; |
282 | int fake_pool_id; | ||
141 | struct cleancache_filekey key = { .u.key = { 0 } }; | 283 | struct cleancache_filekey key = { .u.key = { 0 } }; |
142 | 284 | ||
285 | if (!backend_registered) { | ||
286 | cleancache_puts++; | ||
287 | return; | ||
288 | } | ||
289 | |||
143 | VM_BUG_ON(!PageLocked(page)); | 290 | VM_BUG_ON(!PageLocked(page)); |
144 | pool_id = page->mapping->host->i_sb->cleancache_poolid; | 291 | fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; |
292 | if (fake_pool_id < 0) | ||
293 | return; | ||
294 | |||
295 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
296 | |||
145 | if (pool_id >= 0 && | 297 | if (pool_id >= 0 && |
146 | cleancache_get_key(page->mapping->host, &key) >= 0) { | 298 | cleancache_get_key(page->mapping->host, &key) >= 0) { |
147 | (*cleancache_ops.put_page)(pool_id, key, page->index, page); | 299 | (*cleancache_ops.put_page)(pool_id, key, page->index, page); |
148 | cleancache_puts++; | 300 | cleancache_puts++; |
149 | } | 301 | } |
@@ -153,19 +305,31 @@ EXPORT_SYMBOL(__cleancache_put_page); | |||
153 | /* | 305 | /* |
154 | * Invalidate any data from cleancache associated with the poolid and the | 306 | * Invalidate any data from cleancache associated with the poolid and the |
155 | * page's inode and page index so that a subsequent "get" will fail. | 307 | * page's inode and page index so that a subsequent "get" will fail. |
308 | * | ||
309 | * The function has two checks before any action is taken - whether | ||
310 | * a backend is registered and whether the sb->cleancache_poolid | ||
311 | * is correct. | ||
156 | */ | 312 | */ |
157 | void __cleancache_invalidate_page(struct address_space *mapping, | 313 | void __cleancache_invalidate_page(struct address_space *mapping, |
158 | struct page *page) | 314 | struct page *page) |
159 | { | 315 | { |
160 | /* careful... page->mapping is NULL sometimes when this is called */ | 316 | /* careful... page->mapping is NULL sometimes when this is called */ |
161 | int pool_id = mapping->host->i_sb->cleancache_poolid; | 317 | int pool_id; |
318 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | ||
162 | struct cleancache_filekey key = { .u.key = { 0 } }; | 319 | struct cleancache_filekey key = { .u.key = { 0 } }; |
163 | 320 | ||
164 | if (pool_id >= 0) { | 321 | if (!backend_registered) |
322 | return; | ||
323 | |||
324 | if (fake_pool_id >= 0) { | ||
325 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
326 | if (pool_id < 0) | ||
327 | return; | ||
328 | |||
165 | VM_BUG_ON(!PageLocked(page)); | 329 | VM_BUG_ON(!PageLocked(page)); |
166 | if (cleancache_get_key(mapping->host, &key) >= 0) { | 330 | if (cleancache_get_key(mapping->host, &key) >= 0) { |
167 | (*cleancache_ops.invalidate_page)(pool_id, | 331 | (*cleancache_ops.invalidate_page)(pool_id, |
168 | key, page->index); | 332 | key, page->index); |
169 | cleancache_invalidates++; | 333 | cleancache_invalidates++; |
170 | } | 334 | } |
171 | } | 335 | } |
@@ -176,12 +340,25 @@ EXPORT_SYMBOL(__cleancache_invalidate_page); | |||
176 | * Invalidate all data from cleancache associated with the poolid and the | 340 | * Invalidate all data from cleancache associated with the poolid and the |
177 | * mappings's inode so that all subsequent gets to this poolid/inode | 341 | * mappings's inode so that all subsequent gets to this poolid/inode |
178 | * will fail. | 342 | * will fail. |
343 | * | ||
344 | * The function has two checks before any action is taken - whether | ||
345 | * a backend is registered and whether the sb->cleancache_poolid | ||
346 | * is correct. | ||
179 | */ | 347 | */ |
180 | void __cleancache_invalidate_inode(struct address_space *mapping) | 348 | void __cleancache_invalidate_inode(struct address_space *mapping) |
181 | { | 349 | { |
182 | int pool_id = mapping->host->i_sb->cleancache_poolid; | 350 | int pool_id; |
351 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | ||
183 | struct cleancache_filekey key = { .u.key = { 0 } }; | 352 | struct cleancache_filekey key = { .u.key = { 0 } }; |
184 | 353 | ||
354 | if (!backend_registered) | ||
355 | return; | ||
356 | |||
357 | if (fake_pool_id < 0) | ||
358 | return; | ||
359 | |||
360 | pool_id = get_poolid_from_fake(fake_pool_id); | ||
361 | |||
185 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) | 362 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) |
186 | (*cleancache_ops.invalidate_inode)(pool_id, key); | 363 | (*cleancache_ops.invalidate_inode)(pool_id, key); |
187 | } | 364 | } |
@@ -189,21 +366,37 @@ EXPORT_SYMBOL(__cleancache_invalidate_inode); | |||
189 | 366 | ||
190 | /* | 367 | /* |
191 | * Called by any cleancache-enabled filesystem at time of unmount; | 368 | * Called by any cleancache-enabled filesystem at time of unmount; |
192 | * note that pool_id is surrendered and may be reutrned by a subsequent | 369 | * note that pool_id is surrendered and may be returned by a subsequent |
193 | * cleancache_init_fs or cleancache_init_shared_fs | 370 | * cleancache_init_fs or cleancache_init_shared_fs. |
194 | */ | 371 | */ |
195 | void __cleancache_invalidate_fs(struct super_block *sb) | 372 | void __cleancache_invalidate_fs(struct super_block *sb) |
196 | { | 373 | { |
197 | if (sb->cleancache_poolid >= 0) { | 374 | int index; |
198 | int old_poolid = sb->cleancache_poolid; | 375 | int fake_pool_id = sb->cleancache_poolid; |
199 | sb->cleancache_poolid = -1; | 376 | int old_poolid = fake_pool_id; |
200 | (*cleancache_ops.invalidate_fs)(old_poolid); | 377 | |
378 | mutex_lock(&poolid_mutex); | ||
379 | if (fake_pool_id >= FAKE_SHARED_FS_POOLID_OFFSET) { | ||
380 | index = fake_pool_id - FAKE_SHARED_FS_POOLID_OFFSET; | ||
381 | old_poolid = shared_fs_poolid_map[index]; | ||
382 | shared_fs_poolid_map[index] = FS_UNKNOWN; | ||
383 | uuids[index] = NULL; | ||
384 | } else if (fake_pool_id >= FAKE_FS_POOLID_OFFSET) { | ||
385 | index = fake_pool_id - FAKE_FS_POOLID_OFFSET; | ||
386 | old_poolid = fs_poolid_map[index]; | ||
387 | fs_poolid_map[index] = FS_UNKNOWN; | ||
201 | } | 388 | } |
389 | sb->cleancache_poolid = -1; | ||
390 | if (backend_registered) | ||
391 | (*cleancache_ops.invalidate_fs)(old_poolid); | ||
392 | mutex_unlock(&poolid_mutex); | ||
202 | } | 393 | } |
203 | EXPORT_SYMBOL(__cleancache_invalidate_fs); | 394 | EXPORT_SYMBOL(__cleancache_invalidate_fs); |
204 | 395 | ||
205 | static int __init init_cleancache(void) | 396 | static int __init init_cleancache(void) |
206 | { | 397 | { |
398 | int i; | ||
399 | |||
207 | #ifdef CONFIG_DEBUG_FS | 400 | #ifdef CONFIG_DEBUG_FS |
208 | struct dentry *root = debugfs_create_dir("cleancache", NULL); | 401 | struct dentry *root = debugfs_create_dir("cleancache", NULL); |
209 | if (root == NULL) | 402 | if (root == NULL) |
@@ -215,6 +408,11 @@ static int __init init_cleancache(void) | |||
215 | debugfs_create_u64("invalidates", S_IRUGO, | 408 | debugfs_create_u64("invalidates", S_IRUGO, |
216 | root, &cleancache_invalidates); | 409 | root, &cleancache_invalidates); |
217 | #endif | 410 | #endif |
411 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | ||
412 | fs_poolid_map[i] = FS_UNKNOWN; | ||
413 | shared_fs_poolid_map[i] = FS_UNKNOWN; | ||
414 | } | ||
415 | cleancache_enabled = 1; | ||
218 | return 0; | 416 | return 0; |
219 | } | 417 | } |
220 | module_init(init_cleancache) | 418 | module_init(init_cleancache) |