diff options
-rw-r--r-- | drivers/staging/zcache/zcache-main.c | 8 | ||||
-rw-r--r-- | drivers/xen/tmem.c | 6 | ||||
-rw-r--r-- | include/linux/cleancache.h | 2 | ||||
-rw-r--r-- | mm/cleancache.c | 62 |
4 files changed, 40 insertions, 38 deletions
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 09c69c8026f9..6bd4ebb3494d 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
@@ -1576,9 +1576,9 @@ static struct cleancache_ops zcache_cleancache_ops = { | |||
1576 | .init_fs = zcache_cleancache_init_fs | 1576 | .init_fs = zcache_cleancache_init_fs |
1577 | }; | 1577 | }; |
1578 | 1578 | ||
1579 | struct cleancache_ops zcache_cleancache_register_ops(void) | 1579 | struct cleancache_ops *zcache_cleancache_register_ops(void) |
1580 | { | 1580 | { |
1581 | struct cleancache_ops old_ops = | 1581 | struct cleancache_ops *old_ops = |
1582 | cleancache_register_ops(&zcache_cleancache_ops); | 1582 | cleancache_register_ops(&zcache_cleancache_ops); |
1583 | 1583 | ||
1584 | return old_ops; | 1584 | return old_ops; |
@@ -1860,7 +1860,7 @@ static int __init zcache_init(void) | |||
1860 | } | 1860 | } |
1861 | zbud_init(); | 1861 | zbud_init(); |
1862 | if (zcache_enabled && !disable_cleancache) { | 1862 | if (zcache_enabled && !disable_cleancache) { |
1863 | struct cleancache_ops old_ops; | 1863 | struct cleancache_ops *old_ops; |
1864 | 1864 | ||
1865 | register_shrinker(&zcache_shrinker); | 1865 | register_shrinker(&zcache_shrinker); |
1866 | old_ops = zcache_cleancache_register_ops(); | 1866 | old_ops = zcache_cleancache_register_ops(); |
@@ -1870,7 +1870,7 @@ static int __init zcache_init(void) | |||
1870 | pr_info("%s: cleancache: ignorenonactive = %d\n", | 1870 | pr_info("%s: cleancache: ignorenonactive = %d\n", |
1871 | namestr, !disable_cleancache_ignore_nonactive); | 1871 | namestr, !disable_cleancache_ignore_nonactive); |
1872 | #endif | 1872 | #endif |
1873 | if (old_ops.init_fs != NULL) | 1873 | if (old_ops != NULL) |
1874 | pr_warn("%s: cleancache_ops overridden\n", namestr); | 1874 | pr_warn("%s: cleancache_ops overridden\n", namestr); |
1875 | } | 1875 | } |
1876 | if (zcache_enabled && !disable_frontswap) { | 1876 | if (zcache_enabled && !disable_frontswap) { |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 7a01a5fd0f63..fd79eab08368 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -236,7 +236,7 @@ static int __init no_cleancache(char *s) | |||
236 | } | 236 | } |
237 | __setup("nocleancache", no_cleancache); | 237 | __setup("nocleancache", no_cleancache); |
238 | 238 | ||
239 | static struct cleancache_ops __initdata tmem_cleancache_ops = { | 239 | static struct cleancache_ops tmem_cleancache_ops = { |
240 | .put_page = tmem_cleancache_put_page, | 240 | .put_page = tmem_cleancache_put_page, |
241 | .get_page = tmem_cleancache_get_page, | 241 | .get_page = tmem_cleancache_get_page, |
242 | .invalidate_page = tmem_cleancache_flush_page, | 242 | .invalidate_page = tmem_cleancache_flush_page, |
@@ -392,9 +392,9 @@ static int __init xen_tmem_init(void) | |||
392 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); | 392 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); |
393 | if (tmem_enabled && use_cleancache) { | 393 | if (tmem_enabled && use_cleancache) { |
394 | char *s = ""; | 394 | char *s = ""; |
395 | struct cleancache_ops old_ops = | 395 | struct cleancache_ops *old_ops = |
396 | cleancache_register_ops(&tmem_cleancache_ops); | 396 | cleancache_register_ops(&tmem_cleancache_ops); |
397 | if (old_ops.init_fs != NULL) | 397 | if (old_ops) |
398 | s = " (WARNING: cleancache_ops overridden)"; | 398 | s = " (WARNING: cleancache_ops overridden)"; |
399 | printk(KERN_INFO "cleancache enabled, RAM provided by " | 399 | printk(KERN_INFO "cleancache enabled, RAM provided by " |
400 | "Xen Transcendent Memory%s\n", s); | 400 | "Xen Transcendent Memory%s\n", s); |
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index 42e55deee757..3af5ea839558 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h | |||
@@ -33,7 +33,7 @@ struct cleancache_ops { | |||
33 | void (*invalidate_fs)(int); | 33 | void (*invalidate_fs)(int); |
34 | }; | 34 | }; |
35 | 35 | ||
36 | extern struct cleancache_ops | 36 | extern struct cleancache_ops * |
37 | cleancache_register_ops(struct cleancache_ops *ops); | 37 | cleancache_register_ops(struct cleancache_ops *ops); |
38 | extern void __cleancache_init_fs(struct super_block *); | 38 | extern void __cleancache_init_fs(struct super_block *); |
39 | extern void __cleancache_init_shared_fs(char *, struct super_block *); | 39 | extern void __cleancache_init_shared_fs(char *, struct super_block *); |
diff --git a/mm/cleancache.c b/mm/cleancache.c index 0cecdbba4bcd..b3ae19b72035 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c | |||
@@ -32,7 +32,7 @@ EXPORT_SYMBOL(cleancache_enabled); | |||
32 | * cleancache_ops is set by cleancache_ops_register to contain the pointers | 32 | * cleancache_ops is set by cleancache_ops_register to contain the pointers |
33 | * to the cleancache "backend" implementation functions. | 33 | * to the cleancache "backend" implementation functions. |
34 | */ | 34 | */ |
35 | static struct cleancache_ops cleancache_ops __read_mostly; | 35 | static struct cleancache_ops *cleancache_ops __read_mostly; |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is | 38 | * Counters available via /sys/kernel/debug/frontswap (if debugfs is |
@@ -72,15 +72,14 @@ static DEFINE_MUTEX(poolid_mutex); | |||
72 | /* | 72 | /* |
73 | * When set to false (default) all calls to the cleancache functions, except | 73 | * When set to false (default) all calls to the cleancache functions, except |
74 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded | 74 | * the __cleancache_invalidate_fs and __cleancache_init_[shared|]fs are guarded |
75 | * by the if (!backend_registered) return. This means multiple threads (from | 75 | * by the if (!cleancache_ops) return. This means multiple threads (from |
76 | * different filesystems) will be checking backend_registered. The usage of a | 76 | * different filesystems) will be checking cleancache_ops. The usage of a |
77 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are | 77 | * bool instead of a atomic_t or a bool guarded by a spinlock is OK - we are |
78 | * OK if the time between the backend's have been initialized (and | 78 | * OK if the time between the backend's have been initialized (and |
79 | * backend_registered has been set to true) and when the filesystems start | 79 | * cleancache_ops has been set to not NULL) and when the filesystems start |
80 | * actually calling the backends. The inverse (when unloading) is obviously | 80 | * actually calling the backends. The inverse (when unloading) is obviously |
81 | * not good - but this shim does not do that (yet). | 81 | * not good - but this shim does not do that (yet). |
82 | */ | 82 | */ |
83 | static bool backend_registered __read_mostly; | ||
84 | 83 | ||
85 | /* | 84 | /* |
86 | * The backends and filesystems work all asynchronously. This is b/c the | 85 | * The backends and filesystems work all asynchronously. This is b/c the |
@@ -90,13 +89,13 @@ static bool backend_registered __read_mostly; | |||
90 | * [shared_|]fs_poolid_map and uuids for. | 89 | * [shared_|]fs_poolid_map and uuids for. |
91 | * | 90 | * |
92 | * b). user does I/Os -> we call the rest of __cleancache_* functions | 91 | * b). user does I/Os -> we call the rest of __cleancache_* functions |
93 | * which return immediately as backend_registered is false. | 92 | * which return immediately as cleancache_ops is false. |
94 | * | 93 | * |
95 | * c). modprobe zcache -> cleancache_register_ops. We init the backend | 94 | * c). modprobe zcache -> cleancache_register_ops. We init the backend |
96 | * and set backend_registered to true, and for any fs_poolid_map | 95 | * and set cleancache_ops to true, and for any fs_poolid_map |
97 | * (which is set by __cleancache_init_fs) we initialize the poolid. | 96 | * (which is set by __cleancache_init_fs) we initialize the poolid. |
98 | * | 97 | * |
99 | * d). user does I/Os -> now that backend_registered is true all the | 98 | * d). user does I/Os -> now that cleancache_ops is true all the |
100 | * __cleancache_* functions can call the backend. They all check | 99 | * __cleancache_* functions can call the backend. They all check |
101 | * that fs_poolid_map is valid and if so invoke the backend. | 100 | * that fs_poolid_map is valid and if so invoke the backend. |
102 | * | 101 | * |
@@ -120,23 +119,26 @@ static bool backend_registered __read_mostly; | |||
120 | * Register operations for cleancache, returning previous thus allowing | 119 | * Register operations for cleancache, returning previous thus allowing |
121 | * detection of multiple backends and possible nesting. | 120 | * detection of multiple backends and possible nesting. |
122 | */ | 121 | */ |
123 | struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) | 122 | struct cleancache_ops *cleancache_register_ops(struct cleancache_ops *ops) |
124 | { | 123 | { |
125 | struct cleancache_ops old = cleancache_ops; | 124 | struct cleancache_ops *old = cleancache_ops; |
126 | int i; | 125 | int i; |
127 | 126 | ||
128 | mutex_lock(&poolid_mutex); | 127 | mutex_lock(&poolid_mutex); |
129 | cleancache_ops = *ops; | ||
130 | |||
131 | backend_registered = true; | ||
132 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | 128 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
133 | if (fs_poolid_map[i] == FS_NO_BACKEND) | 129 | if (fs_poolid_map[i] == FS_NO_BACKEND) |
134 | fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE); | 130 | fs_poolid_map[i] = ops->init_fs(PAGE_SIZE); |
135 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) | 131 | if (shared_fs_poolid_map[i] == FS_NO_BACKEND) |
136 | shared_fs_poolid_map[i] = (*cleancache_ops.init_shared_fs) | 132 | shared_fs_poolid_map[i] = ops->init_shared_fs |
137 | (uuids[i], PAGE_SIZE); | 133 | (uuids[i], PAGE_SIZE); |
138 | } | 134 | } |
139 | out: | 135 | /* |
136 | * We MUST set cleancache_ops _after_ we have called the backends | ||
137 | * init_fs or init_shared_fs functions. Otherwise the compiler might | ||
138 | * re-order where cleancache_ops is set in this function. | ||
139 | */ | ||
140 | barrier(); | ||
141 | cleancache_ops = ops; | ||
140 | mutex_unlock(&poolid_mutex); | 142 | mutex_unlock(&poolid_mutex); |
141 | return old; | 143 | return old; |
142 | } | 144 | } |
@@ -151,8 +153,8 @@ void __cleancache_init_fs(struct super_block *sb) | |||
151 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { | 153 | for (i = 0; i < MAX_INITIALIZABLE_FS; i++) { |
152 | if (fs_poolid_map[i] == FS_UNKNOWN) { | 154 | if (fs_poolid_map[i] == FS_UNKNOWN) { |
153 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; | 155 | sb->cleancache_poolid = i + FAKE_FS_POOLID_OFFSET; |
154 | if (backend_registered) | 156 | if (cleancache_ops) |
155 | fs_poolid_map[i] = (*cleancache_ops.init_fs)(PAGE_SIZE); | 157 | fs_poolid_map[i] = cleancache_ops->init_fs(PAGE_SIZE); |
156 | else | 158 | else |
157 | fs_poolid_map[i] = FS_NO_BACKEND; | 159 | fs_poolid_map[i] = FS_NO_BACKEND; |
158 | break; | 160 | break; |
@@ -172,8 +174,8 @@ void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) | |||
172 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { | 174 | if (shared_fs_poolid_map[i] == FS_UNKNOWN) { |
173 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; | 175 | sb->cleancache_poolid = i + FAKE_SHARED_FS_POOLID_OFFSET; |
174 | uuids[i] = uuid; | 176 | uuids[i] = uuid; |
175 | if (backend_registered) | 177 | if (cleancache_ops) |
176 | shared_fs_poolid_map[i] = (*cleancache_ops.init_shared_fs) | 178 | shared_fs_poolid_map[i] = cleancache_ops->init_shared_fs |
177 | (uuid, PAGE_SIZE); | 179 | (uuid, PAGE_SIZE); |
178 | else | 180 | else |
179 | shared_fs_poolid_map[i] = FS_NO_BACKEND; | 181 | shared_fs_poolid_map[i] = FS_NO_BACKEND; |
@@ -240,7 +242,7 @@ int __cleancache_get_page(struct page *page) | |||
240 | int fake_pool_id; | 242 | int fake_pool_id; |
241 | struct cleancache_filekey key = { .u.key = { 0 } }; | 243 | struct cleancache_filekey key = { .u.key = { 0 } }; |
242 | 244 | ||
243 | if (!backend_registered) { | 245 | if (!cleancache_ops) { |
244 | cleancache_failed_gets++; | 246 | cleancache_failed_gets++; |
245 | goto out; | 247 | goto out; |
246 | } | 248 | } |
@@ -255,7 +257,7 @@ int __cleancache_get_page(struct page *page) | |||
255 | goto out; | 257 | goto out; |
256 | 258 | ||
257 | if (pool_id >= 0) | 259 | if (pool_id >= 0) |
258 | ret = (*cleancache_ops.get_page)(pool_id, | 260 | ret = cleancache_ops->get_page(pool_id, |
259 | key, page->index, page); | 261 | key, page->index, page); |
260 | if (ret == 0) | 262 | if (ret == 0) |
261 | cleancache_succ_gets++; | 263 | cleancache_succ_gets++; |
@@ -282,7 +284,7 @@ void __cleancache_put_page(struct page *page) | |||
282 | int fake_pool_id; | 284 | int fake_pool_id; |
283 | struct cleancache_filekey key = { .u.key = { 0 } }; | 285 | struct cleancache_filekey key = { .u.key = { 0 } }; |
284 | 286 | ||
285 | if (!backend_registered) { | 287 | if (!cleancache_ops) { |
286 | cleancache_puts++; | 288 | cleancache_puts++; |
287 | return; | 289 | return; |
288 | } | 290 | } |
@@ -296,7 +298,7 @@ void __cleancache_put_page(struct page *page) | |||
296 | 298 | ||
297 | if (pool_id >= 0 && | 299 | if (pool_id >= 0 && |
298 | cleancache_get_key(page->mapping->host, &key) >= 0) { | 300 | cleancache_get_key(page->mapping->host, &key) >= 0) { |
299 | (*cleancache_ops.put_page)(pool_id, key, page->index, page); | 301 | cleancache_ops->put_page(pool_id, key, page->index, page); |
300 | cleancache_puts++; | 302 | cleancache_puts++; |
301 | } | 303 | } |
302 | } | 304 | } |
@@ -318,7 +320,7 @@ void __cleancache_invalidate_page(struct address_space *mapping, | |||
318 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | 320 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; |
319 | struct cleancache_filekey key = { .u.key = { 0 } }; | 321 | struct cleancache_filekey key = { .u.key = { 0 } }; |
320 | 322 | ||
321 | if (!backend_registered) | 323 | if (!cleancache_ops) |
322 | return; | 324 | return; |
323 | 325 | ||
324 | if (fake_pool_id >= 0) { | 326 | if (fake_pool_id >= 0) { |
@@ -328,7 +330,7 @@ void __cleancache_invalidate_page(struct address_space *mapping, | |||
328 | 330 | ||
329 | VM_BUG_ON(!PageLocked(page)); | 331 | VM_BUG_ON(!PageLocked(page)); |
330 | if (cleancache_get_key(mapping->host, &key) >= 0) { | 332 | if (cleancache_get_key(mapping->host, &key) >= 0) { |
331 | (*cleancache_ops.invalidate_page)(pool_id, | 333 | cleancache_ops->invalidate_page(pool_id, |
332 | key, page->index); | 334 | key, page->index); |
333 | cleancache_invalidates++; | 335 | cleancache_invalidates++; |
334 | } | 336 | } |
@@ -351,7 +353,7 @@ void __cleancache_invalidate_inode(struct address_space *mapping) | |||
351 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; | 353 | int fake_pool_id = mapping->host->i_sb->cleancache_poolid; |
352 | struct cleancache_filekey key = { .u.key = { 0 } }; | 354 | struct cleancache_filekey key = { .u.key = { 0 } }; |
353 | 355 | ||
354 | if (!backend_registered) | 356 | if (!cleancache_ops) |
355 | return; | 357 | return; |
356 | 358 | ||
357 | if (fake_pool_id < 0) | 359 | if (fake_pool_id < 0) |
@@ -360,7 +362,7 @@ void __cleancache_invalidate_inode(struct address_space *mapping) | |||
360 | pool_id = get_poolid_from_fake(fake_pool_id); | 362 | pool_id = get_poolid_from_fake(fake_pool_id); |
361 | 363 | ||
362 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) | 364 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) |
363 | (*cleancache_ops.invalidate_inode)(pool_id, key); | 365 | cleancache_ops->invalidate_inode(pool_id, key); |
364 | } | 366 | } |
365 | EXPORT_SYMBOL(__cleancache_invalidate_inode); | 367 | EXPORT_SYMBOL(__cleancache_invalidate_inode); |
366 | 368 | ||
@@ -387,8 +389,8 @@ void __cleancache_invalidate_fs(struct super_block *sb) | |||
387 | fs_poolid_map[index] = FS_UNKNOWN; | 389 | fs_poolid_map[index] = FS_UNKNOWN; |
388 | } | 390 | } |
389 | sb->cleancache_poolid = -1; | 391 | sb->cleancache_poolid = -1; |
390 | if (backend_registered) | 392 | if (cleancache_ops) |
391 | (*cleancache_ops.invalidate_fs)(old_poolid); | 393 | cleancache_ops->invalidate_fs(old_poolid); |
392 | mutex_unlock(&poolid_mutex); | 394 | mutex_unlock(&poolid_mutex); |
393 | } | 395 | } |
394 | EXPORT_SYMBOL(__cleancache_invalidate_fs); | 396 | EXPORT_SYMBOL(__cleancache_invalidate_fs); |